jackkuo commited on
Commit
923903f
·
verified ·
1 Parent(s): 335eef9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. 4tFIT4oBgHgl3EQf7Ctv/content/tmp_files/2301.11396v1.pdf.txt +1650 -0
  3. 4tFIT4oBgHgl3EQf7Ctv/content/tmp_files/load_file.txt +0 -0
  4. 89AzT4oBgHgl3EQf-_4J/content/tmp_files/2301.01940v1.pdf.txt +943 -0
  5. 89AzT4oBgHgl3EQf-_4J/content/tmp_files/load_file.txt +0 -0
  6. 99E0T4oBgHgl3EQfwwHR/content/tmp_files/2301.02638v1.pdf.txt +0 -0
  7. 99E0T4oBgHgl3EQfwwHR/content/tmp_files/load_file.txt +0 -0
  8. ANE4T4oBgHgl3EQf4w6U/content/tmp_files/2301.05316v1.pdf.txt +889 -0
  9. ANE4T4oBgHgl3EQf4w6U/content/tmp_files/load_file.txt +0 -0
  10. BtE0T4oBgHgl3EQfQAAH/content/tmp_files/2301.02185v1.pdf.txt +1146 -0
  11. BtE0T4oBgHgl3EQfQAAH/content/tmp_files/load_file.txt +0 -0
  12. GdAyT4oBgHgl3EQfSvfs/content/tmp_files/2301.00094v1.pdf.txt +1049 -0
  13. GdAyT4oBgHgl3EQfSvfs/content/tmp_files/load_file.txt +0 -0
  14. GdE1T4oBgHgl3EQfrAWz/vector_store/index.faiss +3 -0
  15. JdAyT4oBgHgl3EQffviy/content/2301.00347v1.pdf +3 -0
  16. OtFKT4oBgHgl3EQffy6V/vector_store/index.pkl +3 -0
  17. OtFOT4oBgHgl3EQf3zQR/content/tmp_files/2301.12947v1.pdf.txt +1348 -0
  18. OtFOT4oBgHgl3EQf3zQR/content/tmp_files/load_file.txt +0 -0
  19. UNAyT4oBgHgl3EQfVvd8/content/tmp_files/2301.00149v1.pdf.txt +2351 -0
  20. UNAyT4oBgHgl3EQfVvd8/content/tmp_files/load_file.txt +0 -0
  21. UNE5T4oBgHgl3EQfAw5l/content/tmp_files/2301.05381v1.pdf.txt +1938 -0
  22. UNE5T4oBgHgl3EQfAw5l/content/tmp_files/load_file.txt +0 -0
  23. UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf +0 -0
  24. UtAzT4oBgHgl3EQf0_7l/content/tmp_files/2301.01794v1.pdf.txt +1452 -0
  25. UtAzT4oBgHgl3EQf0_7l/content/tmp_files/load_file.txt +194 -0
  26. W9E1T4oBgHgl3EQfvgXD/content/tmp_files/2301.03401v1.pdf.txt +1467 -0
  27. W9E1T4oBgHgl3EQfvgXD/content/tmp_files/load_file.txt +0 -0
  28. XNAyT4oBgHgl3EQf9PpX/vector_store/index.pkl +3 -0
  29. XNE0T4oBgHgl3EQf3QLX/content/tmp_files/2301.02724v1.pdf.txt +1914 -0
  30. XNE0T4oBgHgl3EQf3QLX/content/tmp_files/load_file.txt +0 -0
  31. Z9A0T4oBgHgl3EQfF_-o/content/tmp_files/2301.02041v1.pdf.txt +383 -0
  32. Z9A0T4oBgHgl3EQfF_-o/content/tmp_files/load_file.txt +189 -0
  33. _9E1T4oBgHgl3EQfDAK2/vector_store/index.faiss +3 -0
  34. aNE4T4oBgHgl3EQfnw2m/content/tmp_files/2301.05179v1.pdf.txt +574 -0
  35. aNE4T4oBgHgl3EQfnw2m/content/tmp_files/load_file.txt +289 -0
  36. cNFAT4oBgHgl3EQfYB2G/content/tmp_files/2301.08537v1.pdf.txt +1056 -0
  37. cNFAT4oBgHgl3EQfYB2G/content/tmp_files/load_file.txt +0 -0
  38. ctAyT4oBgHgl3EQfjPgK/content/tmp_files/2301.00409v1.pdf.txt +694 -0
  39. ctAyT4oBgHgl3EQfjPgK/content/tmp_files/load_file.txt +0 -0
  40. dtAzT4oBgHgl3EQf3f5y/content/tmp_files/2301.01830v1.pdf.txt +590 -0
  41. dtAzT4oBgHgl3EQf3f5y/content/tmp_files/load_file.txt +0 -0
  42. ftAzT4oBgHgl3EQfof1Y/content/tmp_files/2301.01597v1.pdf.txt +2796 -0
  43. ftAzT4oBgHgl3EQfof1Y/content/tmp_files/load_file.txt +0 -0
  44. hdA0T4oBgHgl3EQfIP8h/content/tmp_files/2301.02071v1.pdf.txt +1404 -0
  45. hdA0T4oBgHgl3EQfIP8h/content/tmp_files/load_file.txt +0 -0
  46. htA0T4oBgHgl3EQfIP9i/vector_store/index.faiss +3 -0
  47. idAzT4oBgHgl3EQfbPxy/content/tmp_files/2301.01382v1.pdf.txt +840 -0
  48. idAzT4oBgHgl3EQfbPxy/content/tmp_files/load_file.txt +350 -0
  49. itAzT4oBgHgl3EQfbPyr/content/tmp_files/2301.01383v1.pdf.txt +1686 -0
  50. itAzT4oBgHgl3EQfbPyr/content/tmp_files/load_file.txt +0 -0
.gitattributes CHANGED
@@ -165,3 +165,8 @@ d9E4T4oBgHgl3EQfpw0m/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
165
  BtE5T4oBgHgl3EQfTQ-D/content/2301.05535v1.pdf filter=lfs diff=lfs merge=lfs -text
166
  6dAyT4oBgHgl3EQfcvcQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
167
  OtFKT4oBgHgl3EQffy6V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
165
  BtE5T4oBgHgl3EQfTQ-D/content/2301.05535v1.pdf filter=lfs diff=lfs merge=lfs -text
166
  6dAyT4oBgHgl3EQfcvcQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
167
  OtFKT4oBgHgl3EQffy6V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
168
+ _9E1T4oBgHgl3EQfDAK2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
169
+ GdE1T4oBgHgl3EQfrAWz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
170
+ htA0T4oBgHgl3EQfIP9i/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
171
+ JdAyT4oBgHgl3EQffviy/content/2301.00347v1.pdf filter=lfs diff=lfs merge=lfs -text
172
+ p9FLT4oBgHgl3EQfiS8Z/content/2301.12106v1.pdf filter=lfs diff=lfs merge=lfs -text
4tFIT4oBgHgl3EQf7Ctv/content/tmp_files/2301.11396v1.pdf.txt ADDED
@@ -0,0 +1,1650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CLASS-INCREMENTAL LEARNING WITH REPETITION
2
+ Hamed Hemati1, Andrea Cossu2, Antonio Carta3, Julio Hurtado3, Lorenzo Pellegrini4
3
+ Davide Bacciu3, Vincenzo Lomonaco3, Damian Borth1
4
5
6
7
+ 1University of St. Gallen, Switzerland
8
+ 2 Scuola Normale Superiore, Italy
9
+ 3University of Pisa, Italy
10
+ 4University of Bologna, Italy
11
+ ABSTRACT
12
+ Real-world data streams naturally include the repetition of previous concepts.
13
+ From a Continual Learning (CL) perspective, repetition is a property of the en-
14
+ vironment and, unlike replay, cannot be controlled by the user. Nowadays, Class-
15
+ Incremental scenarios represent the leading test-bed for assessing and comparing
16
+ CL strategies. This family of scenarios is very easy to use, but it never allows
17
+ revisiting previously seen classes, thus completely disregarding the role of repe-
18
+ tition. We focus on the family of Class-Incremental with Repetition (CIR) sce-
19
+ narios, where repetition is embedded in the definition of the stream. We propose
20
+ two stochastic scenario generators that produce a wide range of CIR scenarios
21
+ starting from a single dataset and a few control parameters. We conduct the first
22
+ comprehensive evaluation of repetition in CL by studying the behavior of existing
23
+ CL strategies under different CIR scenarios. We then present a novel replay strat-
24
+ egy that exploits repetition and counteracts the natural imbalance present in the
25
+ stream. On both CIFAR100 and TinyImageNet, our strategy outperforms other
26
+ replay approaches, which are not designed for environments with repetition.
27
+ 1
28
+ INTRODUCTION
29
+ Continual Learning (CL) requires a model to learn new information from a stream of experiences
30
+ presented over time, without forgetting previous knowledge (Parisi et al., 2019; Lesort et al., 2020).
31
+ The nature and characteristics of the data stream can vary a lot depending on the real-world en-
32
+ vironment and target application. Class-Incremental (CI) scenarios (Rebuffi et al., 2017) are the
33
+ most popular ones in CL. CI requires the model to solve a classification problem where new classes
34
+ appear over time. Importantly, when a set of new classes appears, the previous ones are never
35
+ seen again. However, the model still needs to correctly predict them at test time. Conversely, in a
36
+ Domain-Incremental (DI) scenario (van de Ven & Tolias, 2019) the model sees all the classes at the
37
+ beginning and continue to observe new instances of the classes over time.
38
+ The CI and DI scenarios have been very helpful to promote and drive CL research in the last few
39
+ years. However, they strongly constrain the properties of the data stream in a way that it sometimes
40
+ considered unrealistic or very limiting (Cossu et al., 2021). Recently, the idea of Class-Incremental
41
+ with Repetition (CIR) scenarios has started to gather some attention in CL (Cossu et al., 2021).
42
+ CIR scenarios are arguably more flexible in the definition of the stream, since they allow both the
43
+ introduction of new classes and the repetition of previously seen classes. Crucially, repetition is a
44
+ property of the environment and cannot be controlled by the CL agent. This is very different from
45
+ Replay strategies (Hayes et al., 2021), where the repetition of previous concepts is heavily structured
46
+ and can be tuned at will.
47
+ CIR defines a family of CL scenarios which ranges from CI (new classes only, without repetition) to
48
+ DI (full repetition of all seen classes). Although appealing, currently there exists neither a quantita-
49
+ 1
50
+ arXiv:2301.11396v1 [cs.LG] 26 Jan 2023
51
+
52
+ tive analysis nor an empirical evaluation of CL strategies learning in CIR scenarios. Mainly, because
53
+ it is not obvious how to build a stream with repetition, given the large amount of variables involved.
54
+ How to manage repetition over time? How to decide what to repeat? What data should we use?
55
+ In this paper, we provide two generators for CIR that, starting from a single dataset, allow to build
56
+ customized streams by only setting few parameters. The generators are as easy to use as CI or DI
57
+ ones.
58
+ We leveraged our generators to run an extensive empirical evaluation of the behavior of CL strategies
59
+ in CIR scenarios. We found out that knowledge accumulation happens naturally in streams with
60
+ repetition. Even a naive fine-tuning, subjected to complete forgetting in CI scenarios, is able to
61
+ accumulate knowledge for classes that are not always present in an experience. We observed that
62
+ Replay strategies still provide an advantage in terms of final accuracy, even though they are not
63
+ crucial to avoid catastrophic forgetting. On one side, distillation-based strategies like LwF (Li &
64
+ Hoiem, 2018) are competitive in streams with a moderate amount of repetition. On the other side,
65
+ existing Replay strategies are not specifically designed for CIR streams. We propose a novel Replay
66
+ approach, called Frequency-Aware Replay (ER-FA) designed for streams with unbalanced repetition
67
+ (few classes appear rarely, the other very frequently). ER-FA surpasses by a large margin other
68
+ Replay variants when looking at infrequent classes and it does not lose performance in terms of
69
+ frequent classes. This leads to a moderate gain in the final accuracy, with a much better robustness
70
+ and a reduced variance across all classes. Our main contributions are:
71
+ 1. The design of two CIR generators, able to create streams with repetition by only setting few
72
+ control parameters. We built both generators with Avalanche (Lomonaco et al., 2021) and
73
+ we will make them publicly available to foster future research. The generators are general
74
+ enough to fit any classification dataset and are fully integrated with Avalanche pipeline to
75
+ run CL experiments.
76
+ 2. We perform an extensive evaluation of the properties of CIR streams and the performance
77
+ of CL strategies. We study knowledge accumulation and we showed that Replay, although
78
+ still effective, is not crucial for the mitigation of catastrophic forgetting. Some approaches
79
+ (e.g., LwF) look more promising than others in CIR scenarios. We consolidate our results
80
+ with an analysis of the CL models over time through Centered Kernel Alignment (CKA)
81
+ (Kornblith et al., 2019) and weights analysis.
82
+ 3. We propose a novel Replay variant, ER-FA, which is designed based on the properties of
83
+ CIR scenarios. ER-FA surpasses other Replay strategies in unbalanced streams and provide
84
+ a more robust performance on infrequent classes without losing accuracy on the frequent
85
+ ones.
86
+ 2
87
+ CLASS-INCREMENTAL LEARNING WITH REPETITION GENERATORS
88
+ 𝑒!
89
+ CI
90
+ DI
91
+ Concepts:
92
+ 𝑒"
93
+ 𝑒#
94
+ 𝑒!
95
+ 𝑒"
96
+ 𝑒#
97
+ 𝑒$
98
+
99
+ 𝑒!
100
+ 𝑒"
101
+ 𝑒#
102
+ 𝑒$
103
+
104
+ CIR
105
+ New Inst.
106
+ New/Old
107
+ Inst.
108
+ Figure 1: Illustration of scenario types
109
+ that can be generated with episodic par-
110
+ tial access to a finite set of concepts.
111
+ The shape colors indicate whether in-
112
+ stances are new in each episode or can
113
+ be a mixture of old and new instances.
114
+ CL requires a model to learn from a stream of N expe-
115
+ riences S = {e1, e2, ..., eN}, where each experience ei
116
+ brings a dataset of examples Dei = {Xi, Yi} . Many
117
+ CL scenarios, like CI or DI, are generated from a fixed
118
+ dataset D = {(x, y); x ∈ X, y ∈ Y }, where x is the in-
119
+ put example, y is the target and Y = {1, · · · , C} is the la-
120
+ bel space (closed-world assumption). Depending on how
121
+ classes from the entire dataset D are shown or revisited
122
+ in the stream, this configuration can lead to CI, CIR or DI
123
+ scenarios (Figure 1). In Table 1, we formally present and
124
+ compare the properties of the three scenario types.
125
+ In CIR, streams with repetition are characterized by mul-
126
+ tiple occurrences of the same class over time. To study
127
+ this scenario, we propose two stream generators designed
128
+ to create a stream from a finite dataset: the Slot-Based
129
+ Generator (Gslot) and the Sampling-Based Generator
130
+ (Gsamp). Gslot generate streams by enforcing constraints
131
+ on the number of occurrences of classes in the stream us-
132
+ ing only two parameters. Gslot does not repeat already observed samples, therefore the stream length
133
+ is limited by the number of classes. However, it guarantees that all samples in the dataset will be
134
+ 2
135
+
136
+ Property
137
+ CI
138
+ DI
139
+ CIR
140
+ Instance Repetition *
141
+ Xei ∩ Xj = Ø
142
+ Xi ∩ Xj = Ø
143
+ |Xi ∩ Xj| ≥ 0
144
+ Domain Coverage
145
+ �i=N
146
+ i=1 Xi = X
147
+ �i=N
148
+ i=1 Xi = X
149
+ �i=N
150
+ i=1 Xi ∈ P(X) \ Ø
151
+ Concept Repetition *
152
+ Yi ∩ Yj = Ø
153
+ Y1 = . . . = YN = Y
154
+ |Yi ∩ Yj| ≥ 0
155
+ Codomain Coverage
156
+ �i=N
157
+ i=1 Yi = Y
158
+ �i=N
159
+ i=1 Yi = Y
160
+ �i=N
161
+ i=1 Yi ∈ P(Y ) \ Ø
162
+ Table 1: Comparison of scenario properties in CI, DI and CIR. P(A) and |A| represent the power
163
+ set and the cardinality of set A. *: ∀1 ≤ i, j ≤ N , i ̸= j.
164
+ observed exactly once during the lifetime of the model. Instead, Gsamp generates streams according
165
+ to several parametric distributions that control the stream properties. It can generate arbitrarily long
166
+ streams in which old instances can also re-appear with some probability.
167
+ 2.1
168
+ SLOT-BASED GENERATOR
169
+ The Slot-Based Generator Gslot allows to carefully control the class repetitions in the generated
170
+ stream with a single parameter K. Gslot takes as input a dataset D, the total number of experiences
171
+ N and the number of slots per experience K. It returns a CIR stream composed by N experiences,
172
+ where each of the K slots in each experience is filled with samples coming from a single class.
173
+ 1
174
+ 10
175
+ 20
176
+ ...
177
+ C
178
+ Number of Experiences (N)
179
+ 1
180
+ 10
181
+ 20
182
+ C
183
+ Number of Slots (K)
184
+ CIR
185
+ Class-Incremental
186
+ Domain Incremental
187
+ Figure 2: Illustration of how various
188
+ scenarios can be generated by Gslot, by
189
+ changing K and N. The red area under
190
+ the blue curve represents invalid scenar-
191
+ ios.
192
+ Gslot constrains the slot-class association such that all the
193
+ samples present in the dataset are seen exactly once in the
194
+ stream. Therefore, Gslot considers repetition at the level
195
+ of concepts. To implement this logic, Gslot first partitions
196
+ all the samples in the dataset into the target number of
197
+ slots. Then, it randomly assigns without replacement K
198
+ slots per experience. At the end, the N mod K blocks
199
+ remaining are assigned to the first experience, such that
200
+ the rest of the stream is not affected by a variable number
201
+ of slots.
202
+ The Slot-Based Generator is useful to study the transition
203
+ from CI scenarios to DI scenarios, obtained by simply
204
+ changing the parameter K (Figure 2). For example, let
205
+ us consider a dataset with 10 classes such as MNIST. By
206
+ choosing N = 5 and K = 2 we obtain the popular Split-
207
+ MNIST, a CI scenario with no repetition and 2 classes for
208
+ each experience. Conversely, by setting N = 5 and K =
209
+ 10 we obtain a DI stream where all the 10 classes appear
210
+ in each experience with new unseen samples. More in
211
+ general, given a dataset with C classes, we obtain a CI scenario by setting K = C
212
+ N (N must divide
213
+ C). We obtain a DI scenario by setting K = C. In Appendix B we illustrate the overall steps of
214
+ stream generation (Figure 12), and provide a step-by-step formal definition of Gslot (Algorithm 2).
215
+ 2.2
216
+ SAMPLING-BASED GENERATOR
217
+ The Sampling-Based Generator (Gsamp) generates arbitrarily long streams and controls the repe-
218
+ titions via probability distributions. The stream generator allows to control the first occurrence of
219
+ new classes and the amount of repetitions of old classes. Unlike Gslot, it allows to generate infinite
220
+ and even unbalanced streams.
221
+ Gsamp parameters:
222
+ • N: Stream length, i.e. number of experiences in the stream.
223
+ • S: Experience size which defines the number of samples in each experience.
224
+ • Pf(S): Probability distribution over the stream S used for sampling the experience ID of
225
+ the first occurrence in each class.
226
+ • Pr: List of repetition probabilities for dataset classes.
227
+ 3
228
+
229
+ Scenario
230
+ Matrix
231
+ Generator
232
+ 𝑁
233
+ 𝐾
234
+ CL Scenario
235
+ ℙ!(𝒮)
236
+ 𝑃"
237
+ Scenario Matrix
238
+ 𝐺!"#$
239
+ Parameters
240
+ Sampler
241
+
242
+ 𝑒#
243
+ 𝑒! 𝑒" 𝑒# 𝑒$ 𝑒%
244
+ 𝑐!
245
+ 𝑐"
246
+ 𝑐#
247
+ 𝑐$
248
+ 𝑐%
249
+ 𝑐&
250
+ 𝑐'
251
+ 𝑐(
252
+ 𝑐)
253
+
254
+
255
+
256
+
257
+
258
+
259
+
260
+
261
+
262
+
263
+ 𝒟
264
+ 𝒮$"%&'
265
+ 𝒮$()$
266
+ 𝑒*
267
+
268
+ 𝑒#
269
+ 𝑒*
270
+ Instances
271
+ Concepts
272
+ Figure 3: Schematic view of Gsamp generator. Each concept is shown with a different color.
273
+ Note that Pf is a probability mass function over the stream S which means it sums up to 1.0 and
274
+ determines in which parts of the stream it is more likely to observe new classes for the first time.
275
+ However, the list of probabilities {p1, p2, ..., pC} in Pr are independent and each probability value
276
+ 0.0 ≤ pi ≤ 1.0 indicates how likely it is for each class to repeat after its first occurrence.
277
+ For each experience, Gsamp samples instances from the original dataset D according to a two step
278
+ process. First, Gsamp defines a C × N binary matrix T called Occurrence Matrix that determines
279
+ which classes can appear in each experience. Then, for each experience ei, 1 ≤ i ≤ N we use the
280
+ i-th column of T to sample data points for that experience. The generator uses the inputs N, Pf(S)
281
+ and Pr to generate T. Therefore, it first initializes T as a zero C × N matrix. Then for each class
282
+ c in the dataset, it uses Pf(S) to sample the index of the experience in which class c appears for
283
+ the first time. Different probability distributions can be used to either spread the first occurrence
284
+ along the stream or concentrate them at the beginning, which allows a good flexibility in the stream
285
+ design. After sampling the first occurrences, the classes are then repeated based on Pr probability
286
+ values to finalize matrix T. In the simplest case, Pr can be fixed to the same value for all classes to
287
+ generate a balanced stream.
288
+ Once the matrix T is constructed, a random sampler is used to sample patterns for each experience.
289
+ Since each experience may contain an arbitrary number of classes, another control parameter that
290
+ could be added here is the fraction of samples per class in experience size S. For simplicity we keep
291
+ the fractions equally fixed and thus the number of datapoints sampled from each class in experience
292
+ ei is ⌊ S
293
+ |Ci|⌋ where |Ci| indicates the number of classes present in ei. Since the sampler is stochastic,
294
+ each time we sample from a class, both new and old patterns can appear for that class. Given a large
295
+ enough stream length N, the final stream will cover the whole dataset with a measurable amount of
296
+ average repetition. In Figure 3 we demonstrate the schematic of the generator Gsamp. We provide
297
+ the pseudo-code for Gsamp in Appendix C (Algorithm 2).
298
+ Although we assume a fixed number of instances per class in D, Gsamp can be easily extended
299
+ to settings where the number of instances can grow over time. Moreover, the sampler can also be
300
+ designed arbitrarily depending on the stochasticity type, e.g., irregular or cyclic.
301
+ 3
302
+ FREQUENCY-AWARE REPLAY
303
+ 0
304
+ 20
305
+ 40
306
+ 60
307
+ 80
308
+ 100
309
+ Experience
310
+ 0.0
311
+ 0.2
312
+ 0.4
313
+ 0.6
314
+ Ratio
315
+ Storage Policy
316
+ FA (Ours)
317
+ CB
318
+ RS
319
+ Figure 4: Ratio of buffer slots for infre-
320
+ quent classes for three random seeds.
321
+ Experience Replay (ER) is the most popular CL strategy
322
+ due to its simplicity of use and high performance in class-
323
+ incremental scenarios. The storage policy, which deter-
324
+ mines which samples to keep in a limited buffer, is the
325
+ major component of ER methods. Class-Balanced (CB)
326
+ and Reservoir Sampling (RS) Vitter (1985) are the most
327
+ popular storage policies in ER methods. CB keeps a fixed
328
+ quota for each class, while RS samples randomly from the
329
+ stream, which leads to the class frequency in the buffer
330
+ being equal to the frequency in the stream. CB and RS
331
+ are great choices for balanced streams such as class incremental scenarios, where the number of
332
+ samples per class is the same over the whole stream. However, as in most real-world scenarios, CIR
333
+ scenarios are naturally unbalanced, and different classes may have completely different repetition
334
+ frequencies. Accordingly, CB and RS storage policies may suffer a big accuracy drop in the infre-
335
+ quent classes of an unbalanced stream. For example, in highly unbalanced streams, RS will store
336
+ an unbalanced buffer replicating the the original distribution of the stream, which is sub-optimal be-
337
+ 4
338
+
339
+ cause the less frequent classes will require more repetition to prevent forgetting, while the frequent
340
+ classes will be repeated naturally via the stream occurrences.
341
+ We propose Frequency-Aware (FA) storage policy that addresses the imbalance issue in CIR streams
342
+ by online adjustment of the buffer slots in accordance with the amount of repetition for each class.
343
+ Given a buffer B with a maximum size of M, a list of previously observed classes P initialized as
344
+ P = {} with a corresponding list O indicating the number of observations per class c in C, and a
345
+ dataset Di from experience ei, the algorithm first checks the present classes Pi in Di and adds them
346
+ to P (P ← P ∪ Pi). Then, for each class c in Pi it increments the number of observations O[c] by
347
+ 1 if the class was previously seen, otherwise it initializes O[c] = 1. After updating the number of
348
+ observations, FA computes the buffer quota Q for all observed classes by inverting the number of
349
+ observations (Q = [
350
+ 1
351
+ O[c]∀c ∈ C]) and normalizes it . This way, the algorithm offers the less frequent
352
+ classes a larger quota. Finally, a procedure ensures the buffer is used to its maximum capacity by
353
+ filling unused slots with samples from more frequent classes sorted by their observation times. This
354
+ is a crucial step since it is possible that an infrequent class which is not present ei will be assigned
355
+ with a larger quota than its current number of samples in B, and therefore the buffer will remain
356
+ partially empty. In Figure 4 we show how our method assigns higher ratio of samples for infrequent
357
+ classes to overcome the imbalance issue in the stream. For further analysis and pseudo-code of FA
358
+ policy refer to Appendix E. We present examples of unbalanced scenarios in Appendix D.
359
+ 4
360
+ EMPIRICAL EVALUATION
361
+ We study CIR scenarios by leveraging our generators Gsamp and Gslot. First, by using Gslot we
362
+ provide quantitative results about forgetting in CL strategies when transitioning from CI to DI sce-
363
+ narios (Sec. 4.1). Then, by using Gsamp we focus on long streams with 500 experiences and
364
+ study the performance of Replay and Naive (Sec. 4.2). The long streams give us the opportunity
365
+ to study knowledge accumulation over time in the presence of repetition. We also provide an intu-
366
+ itive interpretation of the model dynamics over long streams (Sec. 4.3). Finally, we show that our
367
+ Frequency-Aware Replay is able to exploit the repetitions present in the stream and to surpass the
368
+ performance of other replay approaches not specifically designed for CIR scenarios (Sec. 4.4).
369
+ The experiments were conducted using the CIFAR-100 Krizhevsky et al. (2009) and Tiny-ImageNet
370
+ LeCun et al. (1998) datasets with the ResNet-18 model. For Gslot, we run experiments for Naive (in-
371
+ cremental fine tuning), LwF Li & Hoiem (2018), EWC Kirkpatrick et al. (2017), Experience Replay
372
+ with reservoir sampling Kim et al. (2020) (ER-RS) and AGEM Chaudhry et al. (2018) strategies.
373
+ For Gsamp we run experiments for Naive and ER (CB/RS/FA) strategies. We set the default buffer
374
+ size for CIFAR-100 to 2000, and for Tiny-ImageNet to 4000 in the replay strategies. We evaluate
375
+ all strategies on the Average Test Accuracy (TA).
376
+ 4.1
377
+ TRANSITION FROM CLASS-INCREMENTAL TO DOMAIN INCREMENTAL
378
+ DI and CI scenarios are heavily studied in the CL literature. However, little is known about what
379
+ happens to the performance of popular CL strategies when gradually transitioning from one scenario
380
+ to the other. By changing the value of K in Gslot, we provide a quantitative analysis of such
381
+ behavior in CIR scenarios. Figure 5 shows the Average Test Accuracy over all classes for different
382
+ CL strategies when transitioning from CI (left-most point of each plot) to DI (right-most point of
383
+ each plot).
384
+ Replay is one of the most effective strategies in CI scenarios. As expected, in CIR scenarios the
385
+ advantage provided by ER-RS with respect to other CL strategies diminishes as the amount of rep-
386
+ etition increases. However, in order for the other strategies to match the performance of ER-RS, the
387
+ environment needs to provide a large amount of repetition.
388
+ LwF guarantees a consistent boost in the performance, both in CIFAR-100 and Tiny-ImageNet.
389
+ In particular, and quite surprisingly, on Tiny-ImageNet LwF is able to quickly close the gap with
390
+ ER-RS and even surpass it as the amount of repetition increases. The positive interplay between
391
+ distillation and repetition provides an effective way to mitigate forgetting in CIR scenarios, without
392
+ the need to explicitly store previous samples in an external memory. EWC showed different sen-
393
+ sitivity to the regularization hyper-parameter λ. We experimented with λ = 0.1, 1, 10, 100. While
394
+ on MNIST we did not see any difference in performance, on CIFAR-100 and Tiny-ImageNet large
395
+ values of λ lead to a dramatic decrease, dropping as low as Naive. We found 0.1 to be the best value
396
+ 5
397
+
398
+ 2
399
+ 3
400
+ 5
401
+ 8
402
+ 10
403
+ K
404
+ 0.0
405
+ 0.2
406
+ 0.4
407
+ 0.6
408
+ 0.8
409
+ 1.0
410
+ Average Test Accuracy
411
+ MNIST
412
+ ER-RS
413
+ LwF
414
+ EWC
415
+ A-GEM
416
+ Naive
417
+ 10
418
+ 30
419
+ 50
420
+ 80
421
+ 100
422
+ K
423
+ 0.0
424
+ 0.2
425
+ 0.4
426
+ 0.6
427
+ CIFAR-100
428
+ 20
429
+ 60
430
+ 100
431
+ 160
432
+ 200
433
+ K
434
+ 0.0
435
+ 0.1
436
+ 0.2
437
+ 0.3
438
+ 0.4
439
+ 0.5
440
+ Tiny-ImageNet
441
+ Figure 5: Average Test Accuracy for different values of K in CIR scenarios generated with Gslot.
442
+ Class-Incremental scenarios are represented by the left-most point of each plot, Domain-Incremental
443
+ scenarios by the right-most point. Results averaged over 3 seeds.
444
+ 50
445
+ 100
446
+ 150
447
+ 200
448
+ 250
449
+ 300
450
+ 350
451
+ 400
452
+ 450
453
+ 500
454
+ Experience
455
+ 0.0
456
+ 0.1
457
+ 0.2
458
+ 0.3
459
+ 0.4
460
+ 0.5
461
+ 0.6
462
+ 0.7
463
+ 0.8
464
+ 0.9
465
+ Class Accuracy
466
+ Class Status
467
+ Present
468
+ Missing
469
+ Figure 6: Accuracy of a particular class over the stream. The target class is either present or absent
470
+ in the experiences indicated by the blue and orange points, respectively.
471
+ on both CIFAR-100 and Tiny-ImageNet. This configuration only provides a low amount of regular-
472
+ ization. Overall, the role played by the natural repetition already guarantees a sufficient stability of
473
+ the model, which is additionally boosted only in the case of LwF.
474
+ 4.2
475
+ IMPACT OF REPETITION IN LONG STREAMS
476
+ We investigate the impact of repetition in long streams (N = 500) generated with Gsamp. For
477
+ the long-stream experiments we also report the missing-classes accuracy (MCA) and seen-classes
478
+ accuracy (SCA). MCA measure the accuracy over the classes that were seen before but are missing
479
+ in the current experience, and SCA measure the accuracy over all seen classes up to the current
480
+ experience.
481
+ Missing Class Accuracy Increases Over Time
482
+ In CI scenarios, a Naive strategy catastrophically
483
+ forgets each class as soon as it starts learning on new classes. Surprisingly, we found that in CIR
484
+ scenarios there is knowledge accumulation over time for all the classes. Figure 6 shows the accuracy
485
+ of a single class over time, highlighting whether the class is present or not in the current experience.
486
+ At the beginning of the stream missing classes are completely forgotten, which can be noticed by
487
+ the instant drop of the accuracy to zero. However, over time the model accumulate knowledge
488
+ and the training process stabilizes. As a result, the accuracy of missing classes tends to increase
489
+ over time, suggesting that the network becomes more resistant to forgetting. Notice that this is an
490
+ example of continual learning property that is completely ignored when testing on CI scenarios.
491
+ This finding prompts the question, ”What is happening to allow knowledge accumulation even for
492
+ Naive finetuning?”. We investigate this question by analysing the model’s accuracy over time and
493
+ the properties of the learned model in the next experiments.
494
+ Accuracy Gap Between Naive and Replay Decreases Over Time
495
+ To study the impact of long
496
+ streams with repetitions we monitor the accuracy gap between ER and Naive fine-tuning by com-
497
+ paring their accuracy after each experience. For the scenario configuration, we set Pf(S) as a
498
+ Geometric distribution with a coefficient of 0.01 and fix the probability of repetition Pr as 0.2 for
499
+ all classes. For more details and illustrations on distribution types refer to Appendix C. In such
500
+ scenarios, the majority of classes occur for the first time in the first quarter of the stream, and then
501
+ repeat with a constant probability of 0.2 which makes them appropriate for our experiments since
502
+ all classes are observed before the middle of the stream and the repetition probability is low enough.
503
+ As can be seen in Figure 7, while the accuracy of ER saturates over time, the accuracy of Naive
504
+ increases, closing the gap between the two strategies from around 25% in experience 100 to 7% in
505
+ 6
506
+
507
+ 0
508
+ 100
509
+ 200
510
+ 300
511
+ 400
512
+ 500
513
+ Experience
514
+ 0.0
515
+ 0.1
516
+ 0.2
517
+ 0.3
518
+ 0.4
519
+ 0.5
520
+ 0.6
521
+ Accuracy
522
+ Average Test Accuracy
523
+ Strategy
524
+ ER-CB
525
+ Naive
526
+ 0
527
+ 100
528
+ 200
529
+ 300
530
+ 400
531
+ 500
532
+ Experience
533
+ 0.0
534
+ 0.1
535
+ 0.2
536
+ 0.3
537
+ 0.4
538
+ 0.5
539
+ 0.6
540
+ Accuracy
541
+ Average Seen Class Accuracy
542
+ Strategy
543
+ ER-CB
544
+ Naive
545
+ 0
546
+ 100
547
+ 200
548
+ 300
549
+ 400
550
+ 500
551
+ Experience
552
+ 0.0
553
+ 0.1
554
+ 0.2
555
+ 0.3
556
+ 0.4
557
+ 0.5
558
+ 0.6
559
+ Accuracy
560
+ Average Missing Class Accuracy
561
+ Strategy
562
+ ER-CB
563
+ Naive
564
+ Figure 7: Average test accuracy and average missing class accuracy plots for long streams streams
565
+ with 500 experiences.
566
+ experience 500. This supports our hypothesis that neural network’s ability to consolidate knowledge
567
+ is significantly influenced by ”natural” repetition in the environment.
568
+ The Role of Repetition
569
+ The amount of repetition is one of the key aspects of a CIR scenario.
570
+ To find out how strategies perform under different repetition probabilities, we consider a setting
571
+ where all components of a scenario are fixed except for Pr. For this experiment, we set Pf(S)
572
+ as geometric distribution with p = 0.2 and let Pr change. In Figure 8 we demonstrate the seen
573
+ class accuracy (SCA) for the Naive and ER-CB strategies in CIFAR-100. It is clear from the plots,
574
+ that the model’s rate of convergence can be significantly affected by the amount of repetition in the
575
+ scenario. Although, it may seem obvious that higher repetition leads to less forgetting, it is not very
576
+ intuitive to what extent different strategies may gain from the environment’s repetition. While the
577
+ Naive strategy gains a lot from increased repetition, the replay strategy saturates after some point
578
+ for higher repetitions and the gaps close between different repetition values.
579
+ 0
580
+ 100
581
+ 200
582
+ 300
583
+ 400
584
+ 500
585
+ Experience
586
+ 0.0
587
+ 0.1
588
+ 0.2
589
+ 0.3
590
+ 0.4
591
+ 0.5
592
+ 0.6
593
+ 0.7
594
+ Accuracy
595
+ ER-CB
596
+ p
597
+ 0.1
598
+ 0.3
599
+ 0.7
600
+ 1.0
601
+ 0
602
+ 100
603
+ 200
604
+ 300
605
+ 400
606
+ 500
607
+ Experience
608
+ 0.0
609
+ 0.1
610
+ 0.2
611
+ 0.3
612
+ 0.4
613
+ 0.5
614
+ 0.6
615
+ 0.7
616
+ Accuracy
617
+ Naive
618
+ p
619
+ 0.1
620
+ 0.3
621
+ 0.7
622
+ 1.0
623
+ Figure 8: Retained accuracy for different values of p in Pr.
624
+ 4.3
625
+ MODEL SIMILARITY AND WEIGHT SPACE ANALYSIS
626
+ Weight Interpolation
627
+ Based on the ”gradual loss drop” observation in missing classes, we study
628
+ how the loss surface changes over time if we perturb the weights. We interpolate between the model
629
+ weights from two consecutive checkpoints with an interval of 10 experiences in various segments
630
+ of the stream. Assuming that w∗
631
+ t and w∗
632
+ t+10 are the obtained solutions for experiences t and t + 10
633
+ respectively, we generate eight in-between models wk = α ∗ w∗
634
+ t + (1 − α) ∗ w∗
635
+ t+50 by by increasing
636
+ α from zero to one, and then compute the accuracy of wk for the data of experience t. We show
637
+ the interpolation accuracy for various pairs of experiments in different segments of the stream for
638
+ the Naive strategy in Figure 9 (left). In the beginning of the stream, the accuracy of experience t
639
+ in each pair drops significantly, while we observe a milder loss drop towards the end of the stream.
640
+ The findings suggest that, towards the end of the stream, even a relatively big perturbation does not
641
+ have a large negative effect on the model’s accuracy and the optimal solutions of the consecutive
642
+ experiments are connected with a linear low-loss path.
643
+ Weight changes
644
+ Another approach to analyzing the gradual drop of the accuracy is by dissecting
645
+ how much, when, and where the weight changes occurs. As shown in Figure 9 (right), we can
646
+ observe that within the first experiences, there is a significant difference for blocks 0, 1, and 2. This
647
+ difference then stalls, showing that as we continue training experiences, the weights of these blocks
648
+ stabilize. On the other hand, blocks 3 and 4 show a linear increase in the difference with the number
649
+ of experiences. An explanation for this phenomenon, is that the first layers of the model capture
650
+ knowledge that can be useful for several classes (more general), so it is unnecessary to change them
651
+ after several experiences. On the other hand, the last blocks are the ones that memorize or learn
652
+ more specific patterns, so they adapt to each experience.
653
+ 7
654
+
655
+ 0.0
656
+ 0.2
657
+ 0.4
658
+ 0.6
659
+ 0.8
660
+ 1.0
661
+ Interpolation Weight
662
+ 0.0
663
+ 0.2
664
+ 0.4
665
+ 0.6
666
+ 0.8
667
+ 1.0
668
+ Accuracy
669
+ Accuracy over Linear Path
670
+ Start
671
+ End
672
+ 50
673
+ 100
674
+ 100
675
+ 150
676
+ 200
677
+ 250
678
+ 300
679
+ 350
680
+ 400
681
+ 450
682
+ 0
683
+ 100
684
+ 200
685
+ 300
686
+ 400
687
+ 500
688
+ Experience
689
+ 0.000
690
+ 0.025
691
+ 0.050
692
+ 0.075
693
+ 0.100
694
+ 0.125
695
+ 0.150
696
+ 0.175
697
+ Difference
698
+ Difference over the Experience
699
+ Blocks
700
+ Block 0
701
+ Block 1
702
+ Block 2
703
+ Block 3
704
+ Block 4
705
+ Figure 9: (left) Interpolation accuracy. (right) Weight changes in each block. The difference used in
706
+ (right) is calculated as Dj =
707
+ 1
708
+ |θ0|
709
+ �θb
710
+ i
711
+ ��� (θ0,i−θj,i)
712
+ ∥θ0,i∥2
713
+ ���, where the weights of experience j are compare
714
+ with the initialization θ0 for each block i
715
+ CKA Analysis
716
+ Finally, we show the CKA Kornblith et al. (2019) of the model in the beginning,
717
+ middle and the end of the stream with an interval difference of 50 experiences. As shown in the
718
+ visualizations in Figure 10, the longer the model is trained on more experiences, the less significant
719
+ the changes in the representations become especially for the final layers. We can see that the diag-
720
+ onal of the CKA becomes sharper propagating forward with more experiments. This indicates that
721
+ although the model is trained on different subsets of classes in each experiment, the representations
722
+ change less after some point in the stream.
723
+ Experience-15
724
+ Experience-5
725
+ 5
726
+ 15
727
+ Experience-35
728
+ Experience-25
729
+ 25
730
+ 35
731
+ Experience-65
732
+ Experience-55
733
+ 55
734
+ 65
735
+ Experience-200
736
+ Experience-190
737
+ 190
738
+ 200
739
+ 0.4
740
+ 0.5
741
+ 0.6
742
+ 0.7
743
+ 0.8
744
+ 0.9
745
+ 1.0
746
+ 0.4
747
+ 0.5
748
+ 0.6
749
+ 0.7
750
+ 0.8
751
+ 0.9
752
+ 1.0
753
+ 0.4
754
+ 0.5
755
+ 0.6
756
+ 0.7
757
+ 0.8
758
+ 0.9
759
+ 1.0
760
+ 0.4
761
+ 0.5
762
+ 0.6
763
+ 0.7
764
+ 0.8
765
+ 0.9
766
+ 1.0
767
+ Figure 10: CKA of the model in different parts of the stream.
768
+ 4.4
769
+ FREQUENCY-AWARE REPLAY IN UNBALANCED SCENARIOS
770
+ 0
771
+ 20
772
+ 40
773
+ 60
774
+ 80
775
+ 100
776
+ Experience
777
+ 0.0
778
+ 0.1
779
+ 0.2
780
+ 0.3
781
+ 0.4
782
+ 0.5
783
+ Accuracy
784
+ Strategy
785
+ ER-FA
786
+ ER-CB
787
+ ER-RS
788
+ Naive
789
+ Figure 11:
790
+ Accuracy of Infrequent
791
+ Classes.
792
+ We conduct experiments for bi-modal unbalanced scenar-
793
+ ios where classes can have a high frequency of 1.0 or a
794
+ low frequency of 0.1. We use a fraction factor that de-
795
+ termines the amount of infrequent classes in the scenario,
796
+ e.g., Fraction=0.3 means that 30% of the classes are infre-
797
+ quent. In Table 2 we compare ER-FA with the Naive, ER-
798
+ RS and ER-CB strategies. The numbers show the MCA
799
+ and average Test Accuracy (TA) metrics for each strategy
800
+ in the end of the stream averaged over three runs. Our
801
+ strategy outperforms all other scenarios in almost all set-
802
+ tings in both CIFAR-100 and TinyImageNet datasets in
803
+ terms of TA, and significantly outperforms other methods
804
+ in terms of MCA (in the last experience). Moreover, we
805
+ illustrate the accuracy of infrequent classes in CIFAR-100 experiments for Fraction=0.3 in Figure
806
+ 11 where ER-FA achieves considerably higher accuracy in the whole stream by assigning larger
807
+ quota to infrequent classes without losing its performance on frequent classes (refer to Appendix G
808
+ for further illustrations).
809
+ 5
810
+ RELATED WORK
811
+ Current CL methods are mainly focused on two types of benchmarks namely, Multi Task (MT) and
812
+ Single Incremental Task (SIT) Maltoni & Lomonaco (2019). MT divides training data into distinct
813
+ tasks and labels them during training and inference. SIT splits a single task into a sequence of
814
+ unlabeled experiences. SIT can be further divided into Domain-Incremental (DI) where all classes
815
+ are seen in each experience, and Class-Incremental (CL) where each experience contains only new
816
+ 8
817
+
818
+ DS
819
+ Strategy
820
+ Fraction= 0.1
821
+ Fraction= 0.3
822
+ Fraction= 0.5
823
+ MCA
824
+ TA
825
+ MCA
826
+ TA
827
+ MCA
828
+ TA
829
+ C-100
830
+ Naive
831
+ 5.0 ± 0.7
832
+ 58.0 ± 0.1
833
+ 7.3 ± 2.2
834
+ 49.0 ± 0.8
835
+ 8.0 ± 2.0
836
+ 40.8 ± 1.4
837
+ ER-RS
838
+ 11.4 ± 0.9
839
+ 57.7 ± 0.7
840
+ 16.7 ± 3.6
841
+ 51.1 ± 0.4
842
+ 20.5 ± 1.9
843
+ 45.6 ± 0.8
844
+ ER-CB
845
+ 30.9 ± 2.7
846
+ 59.5 ± 0.1
847
+ 34.5 ± 1.7
848
+ 55.3 ± 0.1
849
+ 35.7 ± 0.5
850
+ 52.0 ± 1.5
851
+ ER-FA
852
+ 52.2 ± 1.1
853
+ 60.8 ± 0.3
854
+ 44.7 ± 1.5
855
+ 57.8 ± 0.4
856
+ 40.9 ± 1.2
857
+ 54.2 ± 1.2
858
+ TIN
859
+ Naive
860
+ 2.0 ± 0.8
861
+ 33.5 ± 0.4
862
+ 2.0 ± 0.1
863
+ 29.1 ± 0.1
864
+ 2.0 ± 0.1
865
+ 24.0 ± 0.4
866
+ ER-RS
867
+ 3.7 ± 0.6
868
+ 31.8 ± 1.2
869
+ 4.4 ± 0.7
870
+ 28.1 ± 0.2
871
+ 6.0 ± 0.1
872
+ 24.0 ± 0.1
873
+ ER-CB
874
+ 10.4 ± 0.2
875
+ 32.2 ± 0.7
876
+ 10.0 ± 1.0
877
+ 28.8 ± 0.2
878
+ 11.0 ± 0.2
879
+ 26.0 ± 0.3
880
+ ER-FA
881
+ 22.0 ± 1.0
882
+ 33.0 ± 0.9
883
+ 15.3 ± 1.0
884
+ 30.4 ± 0.1
885
+ 13.6 ± 0.1
886
+ 27.0 ± 0.1
887
+ Table 2: Unbalanced scenario results for the CIFAR-100 (C-100) and TinyImageNet (TIN) dataset.
888
+ “Fraction” refers to the fraction of infrequent classes having repetition probability of only 10%.
889
+ (unseen) classes van de Ven & Tolias (2019). Both DI and CI are extreme cases and are unlikely to
890
+ hold in real-world environments Cossu et al. (2021). In a more realistic setting, the role of natural
891
+ repetition in CL scenarios was studied in the context of New Instances and Classes (NIC) scenario
892
+ Lomonaco et al. (2020) and the CRIB benchmark Stojanov et al. (2019). NIC mainly focuses on
893
+ small experiences composed of images of the same object, and repetitions in CRIB are adapted to
894
+ a certain dataset and protocol. The Class-Incremental with Repetition (CIR) scenario was initially
895
+ formalized in Cossu et al. (2021), however the work lacks a systematic study of CIR scenarios as
896
+ the wide range of CIR scenarios makes them difficult to study.
897
+ To counter the lack of repetition in CI, replay has been extensively used as a CL strategy (Rebuffi
898
+ et al., 2017; Lopez-Paz & Ranzato, 2017; Chaudhry et al., 2018; Wu et al., 2019; Castro et al.,
899
+ 2018; Belouadah & Popescu, 2019; Kim et al., 2020; Douillard et al., 2020). In such methods,
900
+ natural repetition is artificially simulated by storing past data in an external memory, and replaying
901
+ them alongside the scenario stream data. Repetition reduces catastrophic forgetting through implicit
902
+ regularization of model’s weights Hayes et al. (2021). In CI benchmarks, replay seems to be the
903
+ only working strategy van de Ven et al. (2020). In other words, replay seems to be a necessity when
904
+ no natural repetition happens.
905
+ Although replay can be seen as a method to simulate natural repetitions artificially, the two concepts
906
+ are fundamentally different. Repetition in replay strategies occurs with the same data seen in pre-
907
+ vious experiences, which is neither realistic nor biologically plausible Gupta et al. (2010). On the
908
+ other hand, natural repetitions of already seen objects occur in different real-world environments,
909
+ and better fit the CIR scenario studied in this paper. Recently, Lesort et al. (2022) scaled the number
910
+ of tasks in a finite world setting (Boult et al., 2019; Mundt et al., 2022) where the model has access to
911
+ a random subset of classes in each experience. The authors proposed naive fine-tuning with masking
912
+ techniques to improve retained accuracy. Our work is different in the sense that we compare among
913
+ different strategies and study various types of repetitions with two flexible generators.
914
+ 6
915
+ DISCUSSION AND CONCLUSION
916
+ We defined CIR scenarios which represent CL environments where repetition is naturally present in
917
+ the stream. Although the concept of repetition is quite intuitive, it is not obvious how to realize it in
918
+ practice for research purposes. Therefore, we proposed two CIR generators that can be exploited to
919
+ address this issue. Through empirical evaluations, we showed that, unlike CI scenarios, knowledge
920
+ accumulation happens naturally in CIR streams, even without applying any CL strategy. This raised
921
+ the question of whether the systematic repetition provided by Replay is critical in all CIR scenarios.
922
+ With several experiments in long streams, we demonstrated that although Replay provides an ad-
923
+ vantage in general, even random repetition in the environment can be sufficient to induce knowledge
924
+ accumulation given a long enough lifetime.
925
+ Moreover, we found that existing Replay strategies are exclusively designed for classical CI scenar-
926
+ ios. Thus, we proposed a novel strategy, ER-FA, to exploit the properties of CIR scenarios. ER-FA
927
+ accumulates knowledge even in highly unbalanced stream in terms of class frequency. ER-FA out-
928
+ performs by a large margin other Replay approaches when monitoring the accuracy for infrequent
929
+ classes while preserving accuracy for the frequent ones. Overall, ER-FA guarantees a more ro-
930
+ bust performance on a wide range of real-world scenarios where classes are not homogeneously
931
+ distributed over time.
932
+ 9
933
+
934
+ The framework defined in this work opens new research directions which depart from the existing
935
+ ones, mainly focused on the mitigation of forgetting in CI scenarios. We hope that our experiments
936
+ and results will promote the study of CIR scenarios and the development of new CL strategies, able
937
+ to exploit the inner semantics of repetition, a natural trait of real-world data streams, in many other
938
+ clever and imaginative ways.
939
+ REFERENCES
940
+ Rahaf Aljundi, Min Lin, Baptiste Goujaud, and Yoshua Bengio. Gradient based sample selection for
941
+ online continual learning. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch´e-Buc, E. Fox,
942
+ and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Cur-
943
+ ran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/
944
+ file/e562cd9c0768d5464b64cf61da7fc6bb-Paper.pdf.
945
+ Jihwan Bang, Heesu Kim, YoungJoon Yoo, Jung-Woo Ha, and Jonghyun Choi. Rainbow mem-
946
+ ory: Continual learning with a memory of diverse samples. In Proceedings of the IEEE/CVF
947
+ Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8218–8227, June 2021.
948
+ Eden Belouadah and Adrian Popescu. Il2m: Class incremental learning with dual memory. In
949
+ Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 583–592, 2019.
950
+ Terrance E Boult, Steve Cruz, Akshay Raj Dhamija, Manuel Gunther, James Henrydoss, and Wal-
951
+ ter J Scheirer. Learning and the unknown: Surveying steps toward open world recognition. In
952
+ Proceedings of the AAAI conference on artificial intelligence, volume 33, pp. 9801–9807, 2019.
953
+ Francisco M Castro, Manuel J Mar´ın-Jim´enez, Nicol´as Guil, Cordelia Schmid, and Karteek Alahari.
954
+ End-to-end incremental learning. In Proceedings of the European conference on computer vision
955
+ (ECCV), pp. 233–248, 2018.
956
+ Arslan Chaudhry, Marc’Aurelio Ranzato, Marcus Rohrbach, and Mohamed Elhoseiny. Efficient
957
+ lifelong learning with a-gem. In International Conference on Learning Representations, 2018.
958
+ Andrea Cossu, Gabriele Graffieti, Lorenzo Pellegrini, Davide Maltoni, Davide Bacciu, Antonio
959
+ Carta, and Vincenzo Lomonaco.
960
+ Is class-incremental enough for continual learning?
961
+ arXiv
962
+ preprint arXiv:2112.02925, 2021.
963
+ Matthias De Lange and Tinne Tuytelaars. Continual prototype evolution: Learning online from non-
964
+ stationary data streams. In Proceedings of the IEEE/CVF International Conference on Computer
965
+ Vision (ICCV), pp. 8250–8259, October 2021.
966
+ Arthur Douillard, Matthieu Cord, Charles Ollion, Thomas Robert, and Eduardo Valle.
967
+ Podnet:
968
+ Pooled outputs distillation for small-tasks incremental learning. In European Conference on Com-
969
+ puter Vision, pp. 86–102. Springer, 2020.
970
+ Anoopum S Gupta, Matthijs AA van der Meer, David S Touretzky, and A David Redish. Hippocam-
971
+ pal replay is not a simple function of experience. Neuron, 65(5):695–705, 2010.
972
+ Tyler L Hayes, Giri P Krishnan, Maxim Bazhenov, Hava T Siegelmann, Terrence J Sejnowski,
973
+ and Christopher Kanan. Replay in deep learning: Current approaches and missing biological
974
+ elements. Neural Computation, 33(11):2908–2950, 2021.
975
+ Chris Dongjoo Kim, Jinseo Jeong, and Gunhee Kim. Imbalanced continual learning with partition-
976
+ ing reservoir sampling. In ECCV, 2020.
977
+ James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A.
978
+ Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, Demis Hass-
979
+ abis, Claudia Clopath, Dharshan Kumaran, and Raia Hadsell. Overcoming catastrophic forget-
980
+ ting in neural networks. Proceedings of the National Academy of Sciences, 114(13):3521–3526,
981
+ 2017. ISSN 0027-8424. doi: 10.1073/pnas.1611835114. URL https://www.pnas.org/
982
+ content/114/13/3521.
983
+ 10
984
+
985
+ Hyunseo Koh, Dahyun Kim, Jung-Woo Ha, and Jonghyun Choi. Online continual learning on class
986
+ incremental blurry task configuration with anytime inference. In The Tenth International Confer-
987
+ ence on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net,
988
+ 2022. URL https://openreview.net/forum?id=nrGGfMbY_qK.
989
+ Simon Kornblith, Mohammad Norouzi, Honglak Lee, and Geoffrey Hinton. Similarity of neural
990
+ network representations revisited. In International Conference on Machine Learning, pp. 3519–
991
+ 3529. PMLR, 2019.
992
+ Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images.
993
+ Technical report, 2009.
994
+ Yann LeCun, L´eon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to
995
+ document recognition. Proceedings of the IEEE, 86(11):2278–2324, 1998.
996
+ Timoth´ee Lesort, Vincenzo Lomonaco, Andrei Stoian, Davide Maltoni, David Filliat, and Natalia
997
+ D´ıaz-Rodr´ıguez. Continual learning for robotics: Definition, framework, learning strategies, op-
998
+ portunities and challenges. Information fusion, 58:52–68, 2020.
999
+ Timoth´ee Lesort, Oleksiy Ostapenko, Diganta Misra, Md Rifat Arefin, Pau Rodr´ıguez, Laurent
1000
+ Charlin, and Irina Rish.
1001
+ Scaling the number of tasks in continual learning.
1002
+ arXiv preprint
1003
+ arXiv:2207.04543, 2022.
1004
+ Zhizhong Li and Derek Hoiem. Learning without forgetting. IEEE Transactions on Pattern Analysis
1005
+ and Machine Intelligence, 40(12):2935–2947, Dec 2018. ISSN 1939-3539. doi: 10.1109/TPAMI.
1006
+ 2017.2773081.
1007
+ Vincenzo Lomonaco, Davide Maltoni, and Lorenzo Pellegrini. Rehearsal-free continual learning
1008
+ over small non-iid batches. In CVPR Workshops, pp. 989–998, 2020.
1009
+ Vincenzo Lomonaco, Lorenzo Pellegrini, Andrea Cossu, Antonio Carta, Gabriele Graffieti, Tyler L.
1010
+ Hayes, Matthias De Lange, Marc Masana, Jary Pomponi, Gido M. van de Ven, Martin Mundt,
1011
+ Qi She, Keiland Cooper, Jeremy Forest, Eden Belouadah, Simone Calderara, German I. Parisi,
1012
+ Fabio Cuzzolin, Andreas S. Tolias, Simone Scardapane, Luca Antiga, Subutai Ahmad, Adrian
1013
+ Popescu, Christopher Kanan, Joost van de Weijer, Tinne Tuytelaars, Davide Bacciu, and Da-
1014
+ vide Maltoni. Avalanche: An end-to-end library for continual learning. In Proceedings of the
1015
+ IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp.
1016
+ 3600–3610, June 2021.
1017
+ David Lopez-Paz and Marc’Aurelio Ranzato. Gradient episodic memory for continual learning.
1018
+ Advances in neural information processing systems, 30, 2017.
1019
+ Davide Maltoni and Vincenzo Lomonaco. Continuous learning in single-incremental-task scenarios.
1020
+ Neural Networks, 116:56–73, 2019.
1021
+ Martin Mundt, Iuliia Pliushch, Sagnik Majumder, Yongwon Hong, and Visvanathan Ramesh. Uni-
1022
+ fied probabilistic deep continual learning through generative replay and open set recognition.
1023
+ Journal of Imaging, 8(4):93, 2022.
1024
+ German I Parisi, Ronald Kemker, Jose L Part, Christopher Kanan, and Stefan Wermter. Continual
1025
+ lifelong learning with neural networks: A review. Neural Networks, 113:54–71, 2019.
1026
+ Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H Lampert. icarl:
1027
+ Incremental classifier and representation learning. In Proceedings of the IEEE conference on
1028
+ Computer Vision and Pattern Recognition, pp. 2001–2010, 2017.
1029
+ Stefan Stojanov, Samarth Mishra, Ngoc Anh Thai, Nikhil Dhanda, Ahmad Humayun, Chen Yu,
1030
+ Linda B. Smith, and James M. Rehg. Incremental Object Learning From Contiguous Views.
1031
+ In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.
1032
+ 8777–8786, 2019.
1033
+ Gido M van de Ven and Andreas S Tolias. Three scenarios for continual learning. arXiv preprint
1034
+ arXiv:1904.07734, 2019.
1035
+ 11
1036
+
1037
+ Gido M van de Ven, Hava T Siegelmann, and Andreas S Tolias. Brain-inspired replay for continual
1038
+ learning with artificial neural networks. Nature communications, 11(1):1–14, 2020.
1039
+ Jeffrey S Vitter. Random sampling with a reservoir. ACM Transactions on Mathematical Software
1040
+ (TOMS), 11(1):37–57, 1985.
1041
+ Yue Wu, Yinpeng Chen, Lijuan Wang, Yuancheng Ye, Zicheng Liu, Yandong Guo, and Yun Fu.
1042
+ Large scale incremental learning. In Proceedings of the IEEE/CVF Conference on Computer
1043
+ Vision and Pattern Recognition, pp. 374–382, 2019.
1044
+ 12
1045
+
1046
+ A
1047
+ RELATED WORK (CONTINUED)
1048
+ Considering benchmark formalization frameworks, De Lange & Tuytelaars (2021) recently pro-
1049
+ posed a subdivision aimed at framing continual learning setups by categorizing them based on the
1050
+ batch and observable horizon that the learning agent is able to access at each time. With this frame-
1051
+ work, the authors aim to better formalize the online learning setup. While the concept of observ-
1052
+ able horizon may be useful in evaluating the significance and local (in time) usefulness of natural
1053
+ repetition in a training stream, this work does not consider the concept of natural repetition in its
1054
+ framework.
1055
+ Recently, Koh et al. (2022) proposed to introduce blurry task boundaries in class incremental bench-
1056
+ marks. Their proposal is based on previous works Bang et al. (2021); Aljundi et al. (2019) that tried
1057
+ to produce more realistic benchmarks by blurring the class-incremental scenario, which however
1058
+ resulted in a setup in which no classes are added to new tasks. They argue that this idea moves the
1059
+ focus too far away from the class-incremental setup and it is still not quite realistic. The resulting
1060
+ setup, named i-Blurry, aims at resolving the aforementioned issues and moving toward a more re-
1061
+ alistic scenario by partitioning the classes available in the source dataset into two groups: Disjoint
1062
+ and Blurry. Classes of the disjoint group are gradually added in successive experiences while sam-
1063
+ ples of classes from the blurry group always appear in all experiences with their numerosity being
1064
+ controlled through a blur ratio M. The authors show that, based on the degree of disjunction N
1065
+ and blurry M, this framework can produce class-incremental (no blurring), domain-incremental (no
1066
+ disjunction), and blurred setups. This setup is the one that most moved towards the direction of
1067
+ introducing repetition in Continual learning benchmarks in a controlled way so far. However, the
1068
+ proposed blurring mechanism is too coarse-grained to simulate a natural repetition of concepts as
1069
+ the significance of the repetition introduced by blurring relies too much on i) a random (uniform)
1070
+ sampling of the concepts to be repeated, ii) the static subdivision of classes in the Disjoint and Blurry
1071
+ groups.
1072
+ B
1073
+ SLOT-BASED GENERATOR
1074
+ Following the properties of CIR scenarios in Section 2, Gslot generates a subset of CIR streams that
1075
+ hold the assumptions below in the defined properties:
1076
+ • |Xi ∩ Xj| = 0 : new instances appear in each experiences
1077
+ • �i=N
1078
+ i=1 Xi = X: all samples are used
1079
+ • |Yi ∩ Yj| ≥ 0, ∀1 ≤ i, j ≤ N where i ̸= j
1080
+ • �i=N
1081
+ i=1 Yi = Y : all classes are used
1082
+ • X is constant.
1083
+ These assumptions allow transitioning through different CIR scenario types between the two ex-
1084
+ tremes of CI and DI.
1085
+ B.1
1086
+ ALGORITHM
1087
+ The overall steps of Gslot are illustrated in Figure 12. In Algorithm 2 we present all steps of Gslot
1088
+ used to generate arbitrary CIR scenarios given a dataset D, number of experiences N and number
1089
+ of slots K. The output of the algorithm is a CL stream.
1090
+ B.2
1091
+ TRANSITIONING
1092
+ Transitioning in Gslot for a scenario with a fixed number of experiences can be done by increasing
1093
+ K. When K = 1 the generated scenario will be class-incremental and as K get closer to the total
1094
+ number of classes in D, the scenarios moves towards a domain incremental setting. In Figure 13 we
1095
+ show an example of how generated scenarios change by increasing K.
1096
+ 13
1097
+
1098
+
1099
+ Concepts:
1100
+ Replicate each concept !×#
1101
+ $
1102
+ times
1103
+ and shuffle the sequence.
1104
+ 𝒆𝟏
1105
+ 𝒆𝟐
1106
+ 𝒆𝟑
1107
+
1108
+ Example: 𝐾 = 4
1109
+ Slot 1
1110
+ Slot 2
1111
+ Slot 3
1112
+ Slot 4
1113
+ New Instances
1114
+ 𝒆𝑵
1115
+ 𝓢𝒕𝒓𝒂𝒊𝒏
1116
+ Figure 12: Illustrations of the overall steps of Gslot. Each shape represents a concept, and the green
1117
+ color means that new instances of that concept are used in each experience.
1118
+ Class
1119
+ Experience
1120
+ Figure 13: From left to right: transitioning from CI to DI in Gslot. Each class is represented with a
1121
+ unique color.
1122
+ C
1123
+ SAMPLING-BASED GENERATOR
1124
+ Following the properties of CIR scenarios in Section 2, Gsamp generates a subset of CIR scenarios
1125
+ that hold all defined properties. Additionally, for any stream S = {e1, e2, ..., eN}, Gsamp defines
1126
+ a probability distribution for the first occurrence of concepts over S and per-class probabilities for
1127
+ each concept c ∈ Y . Gsamp can generate arbitrarily long stream (N ≥ 1) and even from a growing
1128
+ set of samples X where Y remains constant.
1129
+ C.1
1130
+ ALGORITHM
1131
+ The overall steps of the Gslot are shows in Algorithm 2.
1132
+ C.2
1133
+ DISTRIBUTION TYPES
1134
+ In this section we show some examples of different discrete distributions that can be used for Pf(S)
1135
+ and Pr. For Pr we use the unnormalized version of the final distribution. Distributions used for
1136
+ Gsamp can be any arbitrary discrete distribution and are not limited to the ones we describe here.
1137
+ C.2.1
1138
+ ZIPFIAN
1139
+ Given the number of elements N and scalar e ≥ 0 , the probability mass function of a Zipfian
1140
+ distribution over a list of N elements is defined in Equation 1. When used for the probability of first
1141
+ occurrence, the distribution can be defined over the experiences of a stream. For example, N can
1142
+ be considered as the number of experiences and i can indicate the ith experience in the stream. By
1143
+ increasing e, the distribution over the stream will be skewed towards the beginning. In figure 14 we
1144
+ demonstrate some examples of first occurrence probabilities over a stream of length 10 generated
1145
+ with Zipf distribution with increasing values of e. Many natural distribution follow Zipf distribution
1146
+ 14
1147
+
1148
+ Algorithm 1 Slot-Based Generator (Gslot) Pseudo-Code.
1149
+ Require: Dataset D = {(xi, yi)}i=1,...,P with C classes, number of experiences N, experience
1150
+ size S present in each experience.
1151
+ Ensure: K ≤ N
1152
+ Ensure: C mod N = 0
1153
+ Ensure: NK mod C = 0
1154
+ cls-idxs = {}
1155
+ ▷ Empty dictionary
1156
+ for y ∈ set({yi}i=1,...,P ) do
1157
+ cls-idxs[y] = []
1158
+ ▷ Empty list init
1159
+ end for
1160
+ for i = 1, . . . , P do
1161
+ cls-idxs[yi].append(i)
1162
+ end for
1163
+ slots={}
1164
+ ▷ Empty dictionary
1165
+ for y ∈ cls-idxs do
1166
+ slots[y] = []
1167
+ ksample = int(len(cls-idxs[y]) /K)
1168
+ for k = 1, . . . , N×K
1169
+ C
1170
+ do
1171
+ subset-idxs = pop(cls-idxs[y], ksample)
1172
+ subset-samples = [xidx for idx ∈ subset-idxs]
1173
+ slots[y].append(subset-samples)
1174
+ end for
1175
+ end for
1176
+ stream = []
1177
+ for n = 1, . . . , N do
1178
+ experience = dataset()
1179
+ seen-classes = []
1180
+ for k=1,...,K do
1181
+ repeat
1182
+ y = sample(slots)
1183
+ until y /∈ seen-classes
1184
+ seen-classes.append(y)
1185
+ experience.add(pop(slots[y], 1))
1186
+ end for
1187
+ stream.append(experience)
1188
+ end for
1189
+ return stream
1190
+ 0
1191
+ 5
1192
+ 10
1193
+ 0.0
1194
+ 0.5
1195
+ 1.0
1196
+ Probability
1197
+ e = 0.0
1198
+ 0
1199
+ 5
1200
+ 10
1201
+ 0.0
1202
+ 0.5
1203
+ 1.0
1204
+ e = 0.5
1205
+ 0
1206
+ 5
1207
+ 10
1208
+ Experience
1209
+ 0.0
1210
+ 0.5
1211
+ 1.0
1212
+ e = 1.0
1213
+ 0
1214
+ 5
1215
+ 10
1216
+ 0.0
1217
+ 0.5
1218
+ 1.0
1219
+ e = 1.5
1220
+ 0
1221
+ 5
1222
+ 10
1223
+ 0.0
1224
+ 0.5
1225
+ 1.0
1226
+ e = 2.0
1227
+ Figure 14: Zipf distribution with varying values of e.
1228
+ and it can be used to generate highly skewed distributions both for first occurrence and repetition
1229
+ probabilities.
1230
+ f(i; e, N) =
1231
+ 1
1232
+ ie
1233
+ �N
1234
+ n=1
1235
+ 1
1236
+ ne
1237
+ (1)
1238
+ 15
1239
+
1240
+ Algorithm 2 Sampling-Based Generator (Gsamp) Pseudo-Code.
1241
+ Require: Dataset D = {(xi, yi)}i=1,...,P with C classes, number of experiences N, number of
1242
+ slots K, probability distribution for first occurrence Pf(S), and list of repetition probabilities Pr.
1243
+ T = {0}C×N
1244
+ ▷ Initialize occurrence matrix with zeros
1245
+ for c ∈ {0, 1, . . . C} do
1246
+ i ∼ Pf(S)
1247
+ ▷ Samples the first occurrence of class c
1248
+ T[c, i] = 1
1249
+ for j ∈ {i, i + 1, . . . N} do
1250
+ r ∼ U(0, 1)
1251
+ ▷ U: uniform distribution over [0, 1.0]
1252
+ if r < Pr[c] then T[c, j] = 1
1253
+ end if
1254
+ end for
1255
+ end for
1256
+ E = {}
1257
+ for ei ∈ {1, 2, . . . N} do
1258
+ Ci = RetrieveClasses(ei)
1259
+ Dei = Sample(D, Ci, S)
1260
+ ▷ Sample S instances from dataset D for classes Ci
1261
+ E ← E ∪ Dei
1262
+ end for
1263
+ Strain, Stest = GenerateStream(E)
1264
+ ▷ Generate streams using E
1265
+ return Strain, Stest
1266
+ C.2.2
1267
+ POISSON
1268
+ The PMF for Poisson distribution is given in Equation 2 where µ ≥ 0. Poisson with larger values
1269
+ of µ can be used for distributions where the probability of occurrence/repetition first rises and then
1270
+ gradually decreases over time.
1271
+ f(i; µ) = µie−µ
1272
+ i!
1273
+ (2)
1274
+ 0
1275
+ 5
1276
+ 10
1277
+ 0.0
1278
+ 0.5
1279
+ 1.0
1280
+ Probability
1281
+ = 0.0
1282
+ 0
1283
+ 5
1284
+ 10
1285
+ 0.0
1286
+ 0.5
1287
+ 1.0
1288
+ = 0.5
1289
+ 0
1290
+ 5
1291
+ 10
1292
+ Experience
1293
+ 0.0
1294
+ 0.5
1295
+ 1.0
1296
+ = 1.0
1297
+ 0
1298
+ 5
1299
+ 10
1300
+ 0.0
1301
+ 0.5
1302
+ 1.0
1303
+ = 1.5
1304
+ 0
1305
+ 5
1306
+ 10
1307
+ 0.0
1308
+ 0.5
1309
+ 1.0
1310
+ = 2.0
1311
+ Figure 15: Poisson distribution with varying values of µ.
1312
+ C.2.3
1313
+ GEOMETRIC
1314
+ Another useful distribution that can be used for the first occurrence probabilities over a stream is
1315
+ Geometric distribution with its PMF given in Equation 3. This distribution is in particular inter-
1316
+ esting for transitioning from domain incremental to class incremental. By setting p = 1, only the
1317
+ probability of experience i = 0 will be equal to 1.0 and the rest will be zero, and by decreasing p,
1318
+ the probability will spread over the stream. In figure 17 we show examples for generated scenarios
1319
+ with Gsamp with Geometric first occurrence and fixed probability of repetition.
1320
+ f(i, p) = (1 − p)i−1p
1321
+ (3)
1322
+ 16
1323
+
1324
+ 0
1325
+ 5
1326
+ 10
1327
+ 0.0
1328
+ 0.5
1329
+ 1.0
1330
+ Probability
1331
+ p = 0.01
1332
+ 0
1333
+ 5
1334
+ 10
1335
+ 0.0
1336
+ 0.5
1337
+ 1.0
1338
+ p = 0.2
1339
+ 0
1340
+ 5
1341
+ 10
1342
+ Experience
1343
+ 0.0
1344
+ 0.5
1345
+ 1.0
1346
+ p = 0.5
1347
+ 0
1348
+ 5
1349
+ 10
1350
+ 0.0
1351
+ 0.5
1352
+ 1.0
1353
+ p = 0.7
1354
+ 0
1355
+ 5
1356
+ 10
1357
+ 0.0
1358
+ 0.5
1359
+ 1.0
1360
+ p = 1.0
1361
+ Figure 16: Geometric distribution with varying values of p.
1362
+ Experience
1363
+ Class
1364
+ Experience
1365
+ Class
1366
+ Experience
1367
+ Class
1368
+ Figure 17: Scenarios generated with Geometric first occurrence and probability of repetition equal
1369
+ to 1.0 for all classes. The p values for the Geometric distributions from left to right are 0.01, 0.2 and
1370
+ 1.0 respectively.
1371
+ D
1372
+ UNBALANCED SCENARIOS
1373
+ In this section we present a particular type of unbalanced scenarios where a subset of classes in
1374
+ the stream have a low probability of repetition and the rest repeat very often. We refer to such
1375
+ scenarios bi-modal scenarios, where each mode refers to a subset of classes with a distinct repetition
1376
+ probability. More specifically, we have a stream of experiences S = {e1, e2, ..., eN} where Y S =
1377
+ �N
1378
+ 1 Yei indicates the set of all available concepts in S. In bi-modal scenarios Y = Y if ∪Y fr where
1379
+ Y if and Y fr are the set of frequent and infrequent concepts respectively, and Y if ∪ Y fr = Ø. In
1380
+ Figure 18 we show examples of unbalanced bi-modal scenarios.
1381
+ Experience
1382
+ Class
1383
+ Experience
1384
+ Class
1385
+ Experience
1386
+ Class
1387
+ Figure 18: Unbalanced scenarios with two modes of repetition. The fractions of infrequent classes
1388
+ from left to right are 0.2, 0.4 and 0.6 respectively. Repetition probabilities for frequent and infre-
1389
+ quent classes are set to 0.2 and 0.9 accordingly.
1390
+ E
1391
+ FREQUENCY-AWARE REPLAY
1392
+ E.1
1393
+ ALGORITHM
1394
+ In Algorithm 3 we present the steps for updating the buffer in FA storage policy.
1395
+ E.2
1396
+ ANALYSIS: VARYING THE FRACTION OF INFREQUENT CLASSES
1397
+ In this section, we study the behavior of FA, CB, and RS storage policies by changing the fraction
1398
+ of infrequent classes. In our analysis, we consider an unbalanced stream generated with Gsamp
1399
+ where N = 100 and the probability of repetition for frequent and infrequent classes are 0.9 and
1400
+ 0.1, respectively. In such streams, the large probability gap between frequent and infrequent classes
1401
+ 17
1402
+
1403
+ Algorithm 3 Frequency-Aware Buffer.
1404
+ Require: Current Buffer Set B, Maximum buffer size M, List of Seen Classes C, Number of
1405
+ Observation per Seen Class O
1406
+ D = GetExperienceDataset(ei)
1407
+ P = DetectPresentClasses(D)
1408
+ C ← C ∪ P
1409
+ for c ∈ P do
1410
+ ▷ For each present class, increment the number of observations
1411
+ if c ∈ O then O[c]+ = 1
1412
+ else O[c] = 1
1413
+ end if
1414
+ end for
1415
+ Q = [
1416
+ 1
1417
+ O[c]∀c ∈ C]
1418
+ ▷ Calculate quota per class
1419
+ ˆQ =
1420
+ Q
1421
+ |Q|
1422
+ ▷ Normalize quota values
1423
+ S = {⌈Q[c] ∗ M⌉∀c ∈ C}
1424
+ ▷ Calculate buffer slot size for each class
1425
+ UpdateSlots(S)
1426
+ ▷ Update assigned slots according to the current state of B
1427
+ UpdateBuffer(B, D, S)
1428
+ return B, M, C, O
1429
+ 0
1430
+ 20
1431
+ 40
1432
+ 60
1433
+ 80
1434
+ 100
1435
+ Experience
1436
+ 0.0
1437
+ 0.2
1438
+ 0.4
1439
+ 0.6
1440
+ 0.8
1441
+ 1.0
1442
+ Ratio
1443
+ Storage Policy
1444
+ FA (Ours)
1445
+ CB
1446
+ RS
1447
+ 0
1448
+ 20
1449
+ 40
1450
+ 60
1451
+ 80
1452
+ 100
1453
+ Experience
1454
+ 0.0
1455
+ 0.2
1456
+ 0.4
1457
+ 0.6
1458
+ 0.8
1459
+ 1.0
1460
+ Ratio
1461
+ Storage Policy
1462
+ FA (Ours)
1463
+ CB
1464
+ RS
1465
+ 0
1466
+ 20
1467
+ 40
1468
+ 60
1469
+ 80
1470
+ 100
1471
+ Experience
1472
+ 0.0
1473
+ 0.2
1474
+ 0.4
1475
+ 0.6
1476
+ 0.8
1477
+ 1.0
1478
+ Ratio
1479
+ Storage Policy
1480
+ FA (Ours)
1481
+ CB
1482
+ RS
1483
+ 0
1484
+ 20
1485
+ 40
1486
+ 60
1487
+ 80
1488
+ 100
1489
+ Experience
1490
+ 0.0
1491
+ 0.2
1492
+ 0.4
1493
+ 0.6
1494
+ 0.8
1495
+ 1.0
1496
+ Ratio
1497
+ Storage Policy
1498
+ FA (Ours)
1499
+ CB
1500
+ RS
1501
+ 0
1502
+ 20
1503
+ 40
1504
+ 60
1505
+ 80
1506
+ 100
1507
+ Experience
1508
+ 0.0
1509
+ 0.2
1510
+ 0.4
1511
+ 0.6
1512
+ 0.8
1513
+ 1.0
1514
+ Ratio
1515
+ Storage Policy
1516
+ FA (Ours)
1517
+ CB
1518
+ RS
1519
+ Figure 19:
1520
+ Ratio of samples for infrequent classes in unbalanced scenarios for the FA,
1521
+ CB and RS policies.
1522
+ Fraction of infrequent classes from top-left to bottom-right are
1523
+ 20%, 40%, 60%, 80%, 100%.
1524
+ helps us observe the difference more clearly. We report the ratio of samples assigned to infrequent
1525
+ classes in the buffer in the lifetime of the model in the stream for scenarios where the fraction of
1526
+ infrequent classes is equal to {20%, 40%, 60%, 80%, 100%}. For this experiment, we set the buffer
1527
+ size to 500 for all methods.
1528
+ As demonstrated in Figure 19, when the fraction of infrequent classes is equal to 20%, i.e. only 20%
1529
+ of classes are infrequent, the ratio is very low for RS policy as it tries to replicate the true distribution
1530
+ of the stream while CB assigns exactly 20% of the buffer space to the infrequent samples. However,
1531
+ we can observe that FA starts to assign more samples over time to the infrequent classes over time as
1532
+ it adapts the buffer slots based on the frequency of repetition. Moreover, it is evident in the plots that,
1533
+ by increasing the fraction of infrequent classes, the ratio gap between FA and CB gets smaller as
1534
+ the quota for CB stays the same while the number of infrequent classes increases. Eventually, when
1535
+ the fraction of infrequent classes is equal to 100%, i.e. all classes have the same (low) probably of
1536
+ repetition, all buffers have exactly the same ratio since all classes are infrequent.
1537
+ In conclusion, FA buffer slots can be very helpful in highly unbalanced streams where a smaller
1538
+ fraction of classes have a low probability of repetition. When the stream moves towards becoming
1539
+ 18
1540
+
1541
+ balanced, the FA and CB get closer, and all methods become similar in the extreme case of a fully
1542
+ balanced stream with similar probability of repetition.
1543
+ F
1544
+ CHANGING Pf(S)
1545
+ 0
1546
+ 100
1547
+ 200
1548
+ 300
1549
+ 400
1550
+ 500
1551
+ Experience
1552
+ 0.0
1553
+ 0.1
1554
+ 0.2
1555
+ 0.3
1556
+ 0.4
1557
+ 0.5
1558
+ 0.6
1559
+ 0.7
1560
+ Accuracy
1561
+ ER-CB
1562
+ p
1563
+ 0.1
1564
+ 0.3
1565
+ 0.7
1566
+ 1.0
1567
+ 0
1568
+ 100
1569
+ 200
1570
+ 300
1571
+ 400
1572
+ 500
1573
+ Experience
1574
+ 0.0
1575
+ 0.1
1576
+ 0.2
1577
+ 0.3
1578
+ 0.4
1579
+ 0.5
1580
+ 0.6
1581
+ 0.7
1582
+ Accuracy
1583
+ Naive
1584
+ p
1585
+ 0.1
1586
+ 0.3
1587
+ 0.7
1588
+ 1.0
1589
+ Figure 20: Average test accuracy for different values of p in first occurrence.
1590
+ We conduct experiments to find the differences between situations in which all classes occur early
1591
+ versus those in which new classes also appear late in the stream in order to analyze the role of first
1592
+ occurrence type. How early or late in the stream we observe all classes of a dataset, depends on
1593
+ the the parameters that control Pf(S). In this experiment, we fix the probability of repetition Pr
1594
+ and change Pf(S)’s parameters. In particular, we opt the geometric distribution for Pf(S) and
1595
+ choose the values {0.1, 0.3, 0.7, 1.0} for its only parameter 0 < p ≤ 1.0. Increasing p is inversely
1596
+ proportional to the spread factor in the first occurrence distribution, i.e. when p is close to 0 all
1597
+ classes happen in the first experience and as we move p toward 1.0 the classes start to spread along
1598
+ the stream. Figure 20 shows the CIFAR-100 results for the Naive and ER-CB strategies. The results
1599
+ suggest that when the spread factor is low, the model initially has difficulty to learn since there
1600
+ are more classes in the initial experiments and thus the model has to learn from fewer instances.
1601
+ However, with more experiences, all first occurrence types, reach almost the same SCA.
1602
+ G
1603
+ ER-FA RESULTS
1604
+ Results in Figure 21 illustrate the total test accuracy and accuracy of frequent classes over time.
1605
+ Although the discrepancy between the accuracies of frequent classes is very small, the total test ac-
1606
+ curacy can significantly vary due to the difference in the accuracy of infrequent classes as presented
1607
+ in Section 4.4.
1608
+ 0
1609
+ 20
1610
+ 40
1611
+ 60
1612
+ 80
1613
+ 100
1614
+ Experience
1615
+ 0.1
1616
+ 0.2
1617
+ 0.3
1618
+ 0.4
1619
+ 0.5
1620
+ 0.6
1621
+ Accuracy
1622
+ Strategy
1623
+ ER-FA
1624
+ ER-CB
1625
+ ER-RS
1626
+ Naive
1627
+ 0
1628
+ 20
1629
+ 40
1630
+ 60
1631
+ 80
1632
+ 100
1633
+ Experience
1634
+ 0.1
1635
+ 0.2
1636
+ 0.3
1637
+ 0.4
1638
+ 0.5
1639
+ 0.6
1640
+ 0.7
1641
+ Accuracy
1642
+ Strategy
1643
+ ER-FA
1644
+ ER-CB
1645
+ ER-RS
1646
+ Naive
1647
+ Figure 21: TA over all classes (left) and frequent classes (right) in a bi-modal unbalanced scenario
1648
+ with Fraction=0.3.
1649
+ 19
1650
+
4tFIT4oBgHgl3EQf7Ctv/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
89AzT4oBgHgl3EQf-_4J/content/tmp_files/2301.01940v1.pdf.txt ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Enabling Augmented Segmentation and Registration
3
+ in Ultrasound-Guided Spinal Surgery via Realistic
4
+ Ultrasound Synthesis from Diagnostic CT Volume
5
+ Ang Li†, Jiayi Han†, Yongjian Zhao, Keyu Li, Li Liu‡
6
+ Abstract—This paper aims to tackle the issues on unavailable
7
+ or insufficient clinical ultrasound (US) data and meaningful
8
+ annotation to enable bone segmentation and registration for US-
9
+ guided spinal surgery. While the US is not a standard paradigm
10
+ for spinal surgery, the scarcity of intra-operative clinical US data
11
+ is an insurmountable bottleneck in training a neural network.
12
+ Moreover, due to the characteristics of US imaging, it is difficult
13
+ to clearly annotate bone surfaces which causes the trained neural
14
+ network missing its attention to the details. Hence, we propose an
15
+ In silico bone US simulation framework that synthesizes realistic
16
+ US images from diagnostic CT volume. Afterward, using these
17
+ simulated bone US we train a lightweight vision transformer
18
+ model that can achieve accurate and on-the-fly bone segmen-
19
+ tation for spinal sonography. In the validation experiments,
20
+ the realistic US simulation was conducted by deriving from
21
+ diagnostic spinal CT volume to facilitate a radiation-free US-
22
+ guided pedicle screw placement procedure. When it is employed
23
+ for training bone segmentation task, the Chamfer distance
24
+ achieves 0.599mm; when it is applied for CT-US registration,
25
+ the associated bone segmentation accuracy achieves 0.93 in
26
+ Dice, and the registration accuracy based on the segmented
27
+ point cloud is 0.13∼3.37mm in a complication-free manner.
28
+ While bone US images exhibit strong echoes at the medium
29
+ interface, it may enable the model indistinguishable between thin
30
+ interfaces and bone surfaces by simply relying on small neighbor-
31
+ hood information. To overcome these shortcomings, we propose
32
+ to utilize a Long-range Contrast Learning Module (LCLM) to
33
+ fully explore the Long-range Contrast between the candidates
34
+ and their surrounding pixels. In the ablation experiments, it is
35
+ verified that the proposed Long-range Contrast Learning module
36
+ is effective in the precise positioning of the US region of interest.
37
+ On top of that, the training data is entirely generated by our
38
+ proposed US simulation framework without fine-tuning based
39
+ on real clinical data, which demonstrates its effectiveness of the
40
+ bone realistic US simulation framework.
41
+ Note to Practitioners—The motivation of this paper is to
42
+ address the issues on unavailable or insufficient bone US images
43
+ and annotation labels. We employ such a data augmentation
44
+ technique to generate realistic simulated bone US and annotation
45
+ associated with the corresponding CT volume. The problems
46
+ of current US data augmentation approaches are mainly the
47
+ inability to generate continuous context-accurate data (neural
48
+ network-based algorithm) and the lack of real-time physical
49
+ simulation capability (traditional acoustics-based algorithm).
50
+ In this paper, we enhance the US simulation derived from
51
+ CT images by supplementing techniques such as extrusion
52
+ simulation, moving space simulation, and image augmentation
53
+ to yield higher quality images, and hence apply these images
54
+ directly to the target task, i.e., train a neural network to
55
+ † These authors contributed equally
56
+ ‡ represents the corresponding author. Email: [email protected]
57
+ Ang Li, Yongjian Zhao, Keyu Li and Li Liu is with Chinese University of
58
+ Hong Kong.
59
+ Jiayi Han is with Fudan University.
60
+ guide US-guided spinal surgery. Besides, we demonstrate the
61
+ effectiveness of the realistic US simulation framework and each
62
+ new module of the neural network using ablation experiments. It
63
+ can be concluded that the neural network trained with the data
64
+ generated by US simulation framework promises to enable US-
65
+ guided pediclescrew placement procedures. In the future work,
66
+ we will apply the In silico bone US simulation as a reinforcement
67
+ learning environment and deploy the trained agents to directly
68
+ guide US-guided procedures.
69
+ Index Terms—Realistic US Simulation, Vision Transformer,
70
+ Bone Surface Segmentation, CT-US Registration, US-Guided
71
+ Spinal Navigation
72
+ I. INTRODUCTION
73
+ Segmentation of bone surfaces from intra-operative US data
74
+ followed by CT-US registration is two critical steps for US-
75
+ guided spinal surgery. Recent research has focused on the
76
+ use of deep learning-enabled methods for accurate, robust,
77
+ and real-time segmentation and registration of bone surfaces.
78
+ However, scarcity of data size, due to a lack of standardized
79
+ data and patient privacy concerns, is a major challenge in
80
+ applying deep learning-enabled methods in the intra-operative
81
+ imaging field. This is specifically a challenge due to the fact
82
+ that the US is not a standard imaging modality in spine-related
83
+ surgeries and US-guided spinal surgeries are not common;
84
+ even if spinal sonography can be available for pre-clinical
85
+ collection procedures, annotations would be still a severe
86
+ challenge due to the vast amounts of data. Another limiting
87
+ factor is the manual data acquisition process: sub-optimal
88
+ orientation of the US transducer with respect to the imaged
89
+ spinal anatomy will result in the user-dependent acquisition of
90
+ low-quality bone scans.
91
+ Increasing the size of existing datasets through data aug-
92
+ mentation in order to improve models’ performance is exten-
93
+ sively investigated. Among them, a fundamental approach to
94
+ obtaining a large labeled dataset is In silico realistic simulation
95
+ of spinal US images. The US simulation methods could be
96
+ broadly categorized into three types: acoustic model-based US
97
+ simulation, image-based US simulation, and generation of a
98
+ virtual image using a generative adversarial network (GAN).
99
+ US generation algorithms based on acoustic models are usually
100
+ very slow [1]. Generating US by GAN may require training
101
+ different networks for different organs, meanwhile, obtaining
102
+ corresponding US volume and CT volume pairs aligned in
103
+ the same space is difficult, allowing for the generation of
104
+ large-scale synthetic US images using GAN a challenging
105
+ task. Image-based approaches attempt to utilize a simulated
106
+ arXiv:2301.01940v1 [eess.IV] 5 Jan 2023
107
+
108
+ 2
109
+ Fig. 1. The figure shows examples of generated Ultrasound images which are the junctional surfaces of the two vertebrae, the plane where the vertebral plate
110
+ could be entirely verified, the plane swept along the spinous, and an arbitrary scan plane, respectively.
111
+ US probe to re-sample the original image (such as a CT scan)
112
+ and then consider the image scalar as acoustic parameters of
113
+ organs and then simulate propagation with acoustic properties.
114
+ Nonetheless, different from other imaging methods such as
115
+ CT and MRI, the US is significantly influenced by gas. To
116
+ avoid its influence, the operator presses the probe to remove
117
+ the gas between the probe and the skin, which leads to the
118
+ distortion of tissues in the original images. However, this
119
+ distortion is ignored by the former researchers. Moreover,
120
+ because each tissue has a different reflection rate, the former
121
+ works segment the image and design specific transfer functions
122
+ to each tissue, which is time-consuming. In addition, the
123
+ conventional use of US image generation is only to assist
124
+ in 2d-3d image registration [2] or for training examining
125
+ physicians [3] without further application of synthetic US
126
+ images.
127
+ In this paper, we propose a novel CT-derived realistic US
128
+ synthesis framework incorporating automated image gener-
129
+ ation with sampling methods, as shown in Fig. 3. From
130
+ (a)∼(b), each column represents the real US, corresponding
131
+ CT scan, reflection map, and transmission map, respectively.
132
+ We simulate the distortion resulting by pressing the probe via
133
+ warping the original image and propose an adaptive transfer
134
+ function that could be directly adopted to the whole image
135
+ which eliminates the transfer function designing process for
136
+ each tissue and highly speeds up the US simulation task.
137
+ To fully take the advantage of the proposed simulation and
138
+ conduct real-time US image segmentation, we further propose
139
+ a lightweight vision transformer with Long-range Contrast
140
+ Learning Module (LCLM) which utilizes a designed cascaded
141
+ dilated convolution layers to achieve dense super-large recep-
142
+ tive field which enhances the US image segmentation. Exper-
143
+ iments demonstrate the proposed simulation system achieves
144
+ state-of-the-art performance compared with other approaches
145
+ and benefits the proposed vision transformer for real US image
146
+ segmentation.
147
+ Our contributions are listed as follows:
148
+ 1) We propose an In silico bone US simulation framework
149
+ that synthesizes realistic US images from diagnostic CT
150
+ volume.
151
+ 2) We develop a lightweight vision transformer model that
152
+ achieves precise and real-time bone segmentation for
153
+ spinal sonography images.
154
+ 3) Experiments demonstrate that the proposed In silico
155
+ bone US simulation approach dramatically enhances the
156
+ segmentation performance in comparison with initial CT
157
+ scans, indicating that the proposed data augmentation
158
+ method is capable of pre-training models for real clinical
159
+ spinal sonography.
160
+ II. RELATED WORK
161
+ In this section, the related work on realistic US simulation
162
+ and associated bone segmentation are discussed.
163
+ A. Realistic US Simulation
164
+ Traditional acoustics-based US simulation software was
165
+ pioneered by Jenson et al. [4] in 1996. K-space method for
166
+ fast computation of pulsed photo-acoustic fields was proposed
167
+ by B. T. Cox et al. [5] in 2005 and Treeby et al. [6] optimized
168
+ the US image propagation model on the K-space method and
169
+ then the widely used K-wave tools have been developed. The
170
+ advantage of this conventional acoustic-based algorithm is that
171
+ it can simulate various types of US imaging systems with the
172
+ physical effect of the images as realistic as possible.
173
+ As these algorithms based on physical models of acoustics
174
+ have efficiency problems to generate large datasets, researchers
175
+ started to develop ray-tracing-based approaches. Burger et al.
176
+ [7] developed a US simulation system by segmenting the
177
+ CT dataset into different tissues, and then assigning velocity,
178
+ impedance, scattering factor, and other acoustic properties
179
+ to each tissue; hence, it was used to simulate the sound
180
+ propagation, absorption, and scattering processes. Cong et al.
181
+ [8] proposed a multi-scale enhancement method to augment
182
+ tubular structures to simulate blood flow, and allow for US
183
+ images more realistic. Piorkowski et al. [3] in 2013 applied
184
+ the algorithms of Wein [9] and Kutter [10] to make a Trans-
185
+ esophageal Echocardiography (TEE) Simulator which has a
186
+ tremendously positive effect on training doctors to perform
187
+ TEE examinations. To further accelerate the image generation
188
+ speed, Wang et al. [11] used NVIDIA’s Optix 6.0 ray-tracing
189
+
190
+ 3
191
+ engine to do the Monte Carlo simulation of US with a good
192
+ result compared to GAN and Field-II [12].
193
+ In recent years, generative adversarial network (GAN) mod-
194
+ els have been used extensively in realistic US simulation
195
+ research. It generates realistic US images after learning from a
196
+ large dataset in the US domain. Hu et al. [13] applied a GAN
197
+ model to yield US images for Freehand scanning, the model
198
+ takes the spatial location information as a conditional input,
199
+ and then outputs the US image at the current location. This
200
+ work contributes to producing US data for the corresponding
201
+ location, yet they were unable to create synthetic US images
202
+ for each specific patient. To address this shortcoming in 2018,
203
+ Tom et al. [14] employed cascaded GAN with an image
204
+ segmentation label as conditional input to create more realistic
205
+ US images and to be able to produce synthetic US images for
206
+ different segmentation results. Nonetheless, according to their
207
+ report, it can be concluded that even with precise segmenta-
208
+ tion the GAN-based virtual US system still has difficulty in
209
+ ensuring the edge intensity and shape as the real US in same.
210
+ B. US Segmentation
211
+ Some early study utilized handcrafted features for US
212
+ segmentation, such as active contour [15], [16]. In recent years,
213
+ the most popular model in US segmentation is UNet [17].
214
+ Many works adopt different reinforcements in UNet-based
215
+ models for US segmentation. [18] proposed a multi-task UNet
216
+ which combines classification and segmentation tasks. [19]
217
+ proposed a lightweight UNet model which alternately adopt
218
+ 3 × 3 and 1 × 1 convolution layers. They also introduced a
219
+ false output suppression mechanism that combines patch-wise
220
+ classification and segmentation results to eliminate false pos-
221
+ itive. [20] adopts spatial attention on US image segmentation
222
+ task.
223
+ III. METHOD
224
+ A. Realistic US Simulation from CT Volume
225
+ The entire US simulation progress is divided into several
226
+ parts: data sampling, data filtering, US transmitting, and image
227
+ blending.
228
+ a) Image Probe and Press simulation: Since the goal
229
+ of the proposed US simulation system is to train doctors or
230
+ artificial intelligence agents for surgical robots, the moving
231
+ space of probe motion cannot be the entire 3D space. The
232
+ probe in the system can move in the provided scanning space,
233
+ but the probe is a curved surface, it is the probe surface, and
234
+ the scanning space will not be fitted perfectly. The scanning
235
+ space will wrap around and fit the probe if the pressure is
236
+ simulated. However, the CT volume will not deform with the
237
+ surface. Hence, an algorithm needs to be proposed to make
238
+ the CT data deform with the probe surface along the scanning
239
+ space. Using the spring-mass model to simulate deformations
240
+ in scanning space by using the moving least squares algorithm
241
+ to fit the CT deformation is certainly good, but also has
242
+ two demerits. The first is solving the spring-mass simulation
243
+ and the moving least squares equation will consume a lot of
244
+ computational resources. Secondly, the algorithm requires a
245
+ lot of extra work to mark the anchor points on the images.
246
+ Under this situation, we changed the local translation image
247
+ warping algorithm proposed by A. Gustafsson for CT data
248
+ and apply it with the shape of the probe to the image. The
249
+ algorithm only needs to know the shape of the probe and the
250
+ HU value of the CT data of that slice to simulate the motion of
251
+ the tissues. If further acceleration is desired, the weight value
252
+ of the HU parameter can be set to 1, then the UV coordinate
253
+ mapping only needs to be calculated once, without affecting
254
+ the imaging efficiency at all.
255
+ The equation is formulated as Equ. 1
256
+
257
+
258
+
259
+
260
+
261
+ ⃗u = ⃗x
262
+ r2
263
+ max−|⃗x−⃗c|2
264
+ r2max−|⃗x−⃗c|2+D(⃗m − ⃗c)
265
+ D = 100
266
+ f α(hu)|⃗m − ⃗c|2
267
+ .
268
+ (1)
269
+ In the above equation, f controls the ratio of deformation.
270
+ In our case, we want to push the intersection point of the
271
+ center-line of the probe and the top line of the sampled image,
272
+ as the blue dot is shown in Fig.4, to the top of the probe
273
+ which is shown in the figure as the red dot. According to
274
+ the mentioned algorithm, all the tissue between the blue and
275
+ red dots needs to be pressed downwards. Assuming that the
276
+ tissue is a rigid body, the thickness of the tissue does not
277
+ change by pressing with a very big force. In this case, the
278
+ original position at the red dot should be pushed to the top of
279
+ the green curve. Then a green arc that represents the limit of
280
+ tissue movement can be drawn. The squeezing of the tissue
281
+ in the area between the probe arc and the green arc will
282
+ cause high reflections or absorption in this area. Hence, in the
283
+ process of simulating transmission, it is significant to make
284
+ the sound waves attenuate less in this area, otherwise, the
285
+ squeezed tissue generated by the algorithm will completely
286
+ block the transmission of US. As you can see from Fig. 3,
287
+ there are a few bright lines below the probe curve, which are
288
+ the signal of the squeezed tissue and it is identical to the vivo
289
+ US image.
290
+ Fig. 2. This figure illustrates the pressure simulation algorithm. As an enlarged
291
+ part of the dashed box in the figure, the yellow arc represents the shape of
292
+ the probe, and the area between the yellow arc and the green arc is the target
293
+ space being squeezed into.
294
+ b) Restricted Movement Space of Probe: As mentioned
295
+ above, the probe cannot move in space arbitrarily. To automate
296
+ the acquired US images, two types of probe moving regulation
297
+ are designed in this paper using spinal US as an instance.
298
+ Define M as a mesh in 3d space, clipping a 3D sub-mesh Mr
299
+
300
+ 4
301
+ Fig. 3.
302
+ Figure (a) shows the CT images after being processed by the pressure simulation, the skin and muscles are compressed into the probe’s curved
303
+ surface. Figure (b) shows the virtual US image produced with simulated extrusion. Figure (c) shows the reflection map after extrusion. Figure (d) shows that
304
+ the processed image will not affect the propagation.
305
+ Fig. 4. The green part of the figure is the object to be scanned. Figure (a) shows the relationship between the entire feasible space (skin) and the scanned
306
+ object(spine). Figure (b) shows the first type of movement, with the yellow dot representing the tip of the probe and the arrow representing the two-degree
307
+ freedom of movement in the manifold. Figure (c) shows the second type of movement, the red line represents the discrete surface, the dashed arrow represents
308
+ the probe can switch on different curves, and the arrow represents the probe can be moved along the curve direction.
309
+ of interest by the bounding box of the scanning target, the
310
+ movement of the probe is only meaningful in this manifold.
311
+ The first way is to move freely on the grid, for any point p
312
+ on Mr, the direction of movement of p will be curved along
313
+ the grid normal vector. In this case, a very small dx or dy
314
+ will not make a great dz, which would not make the image
315
+ change dramatically and could not acquire a continuous image.
316
+ The second method is to slice Mr into a series of curves
317
+ in 3D space along the forward direction Mr0...Mrn. In each
318
+ polyline, the points can only move forward or backward along
319
+ the tangent direction by a certain distance. It has the merit
320
+ that keeping the components of the probe’s normal vector
321
+ remain consistent across that curve, which is often used for
322
+ automatic circular or flat scanning of a target and ensuring that
323
+ the target being swept is on a line or a point. Besides, both of
324
+ these movement methods can be employed by a reinforcement
325
+ learning-based agent, and the first method is more suitable for
326
+ continuous control, while the second method is more suitable
327
+ for discrete control.
328
+ c) Sound Reflection: The first step in the simulation is
329
+ to map the CT-HU image into an acoustic impedance image,
330
+ with the mapping look-up table references to [21]. In the
331
+ mapping table, each HU value will be mapped to an acoustic
332
+ impedance Z. The image of this acoustic impedance will then
333
+ be processed instead of the CT image. Based on the acoustic
334
+ physical transfer properties, we first introduce the Fresnel
335
+ equation to calculate the reflection. Dividing sound waves
336
+ into those parallel and those perpendicular to the plane, the
337
+ equations are:
338
+
339
+
340
+
341
+ R⊥ = ( Z1cosθi−Z2cosθt
342
+ Z1cosθi+Z2cosθt )2
343
+ R∥ = ( Z2cosθi−Z1cosθt
344
+ Z2cosθi+Z1cosθt )2
345
+ R = 1
346
+ 2(R⊥ + R∥)
347
+ .
348
+ (2)
349
+ When sound waves transmitted from one medium to an-
350
+ other, it is not only be reflected but also be refracted. This
351
+ refracted has the same physical properties as the refracted of
352
+ light passing through different media which is given by Snell’s
353
+ law:
354
+ sinθ1
355
+ sinθ2
356
+ = Z2
357
+ Z1
358
+ (3)
359
+ In fact, when a sound wave is refracted or bent, the direction
360
+ will be kept until the next transmission. It is possible to
361
+ simulate multi-level refraction and reflection values using a
362
+
363
+ 5
364
+ recursive ray-tracing algorithm However, since the amplitude
365
+ of the refracted sound waves is small and has little effect on
366
+ the generated image quality, the proposed method does not
367
+ consider its direction after refraction and only superimposes
368
+ the amplitude on the incident sound wave of the next medium.
369
+ Irradiance: Lambert’s cosine theorem is introduced to take
370
+ the irradiance of the sound wave should into consideration.
371
+ This equation means that the more perpendicular the direction
372
+ of the tissue gradient and the sound wave, the stronger the
373
+ sound reflection and the brighter the tissue.
374
+ Ir
375
+ Ii
376
+ = cosθ
377
+ (4)
378
+ d) Sound Attenuation: Attenuation in the propagation of
379
+ US passing through tissue could be broadly divided into re-
380
+ flection, scattering, and absorption, where absorption accounts
381
+ for the majority of it. The paper [22]states that US absorption
382
+ of substances such as bovine and porcine livers accounts for
383
+ 90% or even 100% of US propagation attenuation. Even in
384
+ the tissues that exhibit anisotropy such as the bovine brain
385
+ and leg muscle absorption still makes a major contribution
386
+ to the attenuation. Therefore, mapping the CT scan to an
387
+ absorption image is important. The mapping table is obtained
388
+ by interpolating the dataI in the paper [23]. The formula for
389
+ absorption is given by the Lambert-Beer law. Since different
390
+ tissues may not have the same US absorption capacity in
391
+ the same Hu value, in order to get a better result from the
392
+ generated image, an adjustable parameter α is added to the
393
+ original equation. The modified equation is as follows:
394
+ Ia = I010−α∗d∗f∗
395
+ 1
396
+ 10∗β
397
+ (5)
398
+ Tissues
399
+ Density
400
+ Velocity
401
+ Impedance
402
+ Attenuation
403
+ Skin
404
+ 1100
405
+ 1631
406
+ 1.794
407
+ 0.22
408
+ Fat
409
+ 916
410
+ 1435
411
+ 1.352
412
+ 0.975
413
+ Muscle
414
+ 1041
415
+ 1595
416
+ 1.647
417
+ 1.47
418
+ TABLE I
419
+ ACOUSTICS PROPERTIES OF DIFFERENT TISSUES. WITH DENSITY(KG
420
+ m−3), VELOCITY(ms−1), IMPEDANCE 106(kgm−2s−1),
421
+ ATTENUATION(dBcm−1MHz−n) [23]
422
+ Fig. 5. Illustration of propagation.
423
+ e) Propagation in Discrete Field: The propagation of
424
+ the US is the most time-consuming part of the generation
425
+ process. Particularly, the steps of finding receiver tissues of
426
+ reflection and refraction would take a lot of time in detecting
427
+ intersections that occur frequently.
428
+ At the same time, the direction of the adjacent pixels is
429
+ discrete, whereas the direction of acoustic transmission is
430
+ continuous. So it may happen that the line segments intersect
431
+ while the pixels do not have an intersection. Each pixel among
432
+ the eight neighborhoods should be regarded as the obstacle of
433
+ the wave propagation and the normalized result of the cosine
434
+ value of the direction of propagation and the direction of the
435
+ pixel connection is used as the weight of the obstacle. That
436
+ is, if the connection direction is in the opposite direction to
437
+ the direction of propagation, the weights will be zero and the
438
+ obstacle weight is maximized when the directions are exactly
439
+ the same.
440
+ In order to calculate the amplitude enhancement due to
441
+ reflection and refraction easily, each point also calculates
442
+ its wave propagation to the neighborhood. In this case, a
443
+ negative cosine value of the pixel connection direction and
444
+ the propagation direction will consider as reflection. Cosine
445
+ values less than 0.8 (a hyper-parameter) will be considered
446
+ as contrast enhancement by refraction and scattering. The
447
+ schematic diagram of propagation is shown in 1
448
+ f) Reflection Enhancement:
449
+ When the transducer re-
450
+ ceives the reflected sound waves from the medium gap, a
451
+ bright stripe appears in the US image. Because the reflection
452
+ only occurs when the properties of the medium change, there
453
+ should be only one reflection between the bone surface and
454
+ the muscle tissue, but the US image often shows a very thick
455
+ bright stripe between bone and muscles. hacihaliloglu et al.
456
+ [24] propose that this bright stripe is produced by the thickness
457
+ of the US in the elevational direction. Based on this theory, this
458
+ paper sampled 3d gradient textures in front of and behind the
459
+ current plane (As the blue line in figure 6). These textures are
460
+ multiplied by a weight value α which is inversely proportional
461
+ to the distance and then accumulated into the current generated
462
+ US image. Simulating thick stripes is significant for neural
463
+ network training; if the thick stripes are not present in the
464
+ training data, the output of the model will deviate from the
465
+ ground truth when segmenting the bone surface on the real
466
+ US data.
467
+ In summary, the generated US image is obtained by cal-
468
+ culating the propagation image with the reflection image and
469
+ the absorption image and then blending these three images
470
+ together with the radial noise.
471
+ B. US Image Segmentation
472
+ In this work, we propose a lightweight segmentation frame-
473
+ work for US segmentation, as shown in Fig. 7. We follow
474
+ [25] as the backbone. After each MobileViT block, we add
475
+ an LCLM for further long-range contrast learning. We then
476
+ utilize a feature pyramid network (FPN) to recover the initial
477
+ resolution of the input US image.
478
+ a) MobileViT block: To cover the global information,
479
+ MobileViT block first utilizes a 3×3 convolution to aggregate
480
+ the neighboring features. The pixels are then grouped into
481
+ patches, each patch contains h × w pixels in which h and w
482
+ represent the height and the width of the patch. The ith token
483
+
484
+ 6
485
+ Fig. 6. The thick stripe is indicated by the arrow in Figure a). Figure b) shows how the thickness of the US waves induces the thick stripes artifact. Figure
486
+ c) is a schematic of sampling in a 3d gradient texture. The Blue line indicates the imaging plane. The reflection value between the blue line and the yellow
487
+ line is multiplied and accumulated into the imaging plane. Figure d) shows the simulated thick stripes in the system.
488
+ Fig. 7. The framework of the proposed lightweight vision transformer for US segmentation. For the first two stages, we only utilize basic convolution layers to
489
+ learn local representations. In the last three stages, we implement MobileViT block and LCLM alternately to cover both long-range dependency and long-range
490
+ contrast.
491
+ of each patch transverses its feature from each other by SA.
492
+ When h <= 3 ∧ w <= 3, each pixel can access information
493
+ from all pixels. Details of the MobileViT block could be found
494
+ in [25].
495
+ b) Why is LCLM needed: We find that the region of
496
+ interest (RoI) in US images has similar textures, but is weak in
497
+ amplitude to other tissues. Basic 3×3 convolutions are capable
498
+ of modeling the neighboring texture but fail to learn long-range
499
+ textures. As the local texture of the to-be-segmented region
500
+ is similar to the other tissues, we have to design a module
501
+ that covers the texture within and outside the to-be-segmented
502
+ region.
503
+ In vision transformers, long-range texture modeling is
504
+ achieved by self-attention (SA) of all tokens as in Equ. 6:
505
+ �x = softmax( xWq(xWk)T
506
+ d
507
+ )(xWv),
508
+ (6)
509
+ in which x ∈ RN×d represents the N input tokens with
510
+ dimension d, Wq ∈ Rd×dq, Wk ∈ Rd×dq, and Wv ∈ Rd×dv
511
+ are learnable parameters. We rewrite the self-attention to the
512
+ formula as in Equ. 7:
513
+
514
+
515
+
516
+
517
+
518
+ �xi = �
519
+ j∈A⟩
520
+ wj(xjWv)
521
+ wj =
522
+ exp(xiWq(xjWk)T /d)
523
+
524
+ t
525
+ exp(xiWq(xtWk)T /d)
526
+ .
527
+ (7)
528
+ where Ai represents the set of accessible tokens of token xi.
529
+ As Wq and Wk only attribute in modeling the relationship
530
+ of queries, once Wv is fixed, the space generated by x is
531
+ therefore limited. Meanwhile, as shown in Fig. 8(a), SA
532
+ tends to aggregate “similar” features from the tokens, which
533
+ helps the model to learn long-range dependency. However,
534
+ because of the nature of US images, long-range contrast is
535
+ also important to be learned, but SA fails to do so.
536
+ Convolution operation could be formulated in the same
537
+ formulation as SA, as shown in Equ. 8:
538
+ �xi = �
539
+ j∈Ai
540
+ λj(xjWv) .
541
+ (8)
542
+ In this case, if rank(xjWv) = d, the space generated by x is
543
+ Rd. We randomize 1000 different pairs of Wq and Wv, 1000
544
+ different λ and fixed Wv. The outputs are shown in Fig. 8. As
545
+ demonstrated in the figure, the outputs of convolution fulfill
546
+ the space, while the outputs of SA are gathered.
547
+
548
+ C+w
549
+ <
550
+ Transducer
551
+ R=
552
+ krk
553
+ k=c-wS
554
+ MViT
555
+ 1/2
556
+ 1/2
557
+ MViT
558
+ LCLM
559
+ Up
560
+ Up7
561
+ Fig. 8. The possible outputs (dots in red) of self-attention (a) and convolution
562
+ (b) with fixed Wv.
563
+ A recent work [26] also demonstrates that CNNs with
564
+ super large kernels learn feature representations from the
565
+ super large receptive field (sub-global), and achieve compara-
566
+ ble performance with state-of-the-art transformers with fewer
567
+ parameters.
568
+ As aforementioned, we need a module to gather long-range
569
+ textures. As SA fails to learn long-range contrast, and a super-
570
+ large convolution kernel leads to large computational cost,
571
+ we need to design a light yet effective convolution module
572
+ for long-range contrast learning, namely Long-range Contrast
573
+ Learning Module (LCLM).
574
+ c) Long-range contrast learning with dilation convo-
575
+ lution: To cover long-range contrast with fewer parameters,
576
+ a simple way is to utilize dilation convolution. In this work,
577
+ we adopt 3 cascaded dilation convolution layers with dila-
578
+ tion {3,5,11}. Along with one basic convolution layer which
579
+ appears at the end of the MobileViT block, LCLM covers
580
+ up to 41 × 41 pixels densely. We show the receptive field in
581
+ Fig. 9. Each convolution layer is followed by a normalization
582
+ layer and an activation layer. A depth-wise LCLM only needs
583
+ 3×3×3×d parameters, which is even smaller than a standard
584
+ convolution layer (3 × 3 × d × d, d >> 3).
585
+ Fig. 9. The receptive field of the pixel in red. The pixels painted not in grey
586
+ are covered by the receptive field.
587
+ IV. EXPERIMENTS
588
+ A. Realistic US Simulation from CT Volume
589
+ It is difficult to provide quantitative evaluation criteria for
590
+ image generation-related works. Qualitatively, the advantages
591
+ of this paper over deep learning-based US image generation
592
+ are patient-specific and more accurate lesion boundaries, and
593
+ when compared with previous papers based on image gener-
594
+ ation, we make the system produce more realistic images by
595
+ simulating squeezing and scattering in soft tissues and bones.
596
+ Nonetheless, the quantitative evaluation is also of significance,
597
+ and since the quality of image generation is hard to be
598
+ evaluated quantitatively, we evaluate the quality of image
599
+ generation from the purpose of the work. Except for training
600
+ doctors to do US examinations, the most important uses of
601
+ the US image generation algorithms are intra-operative reg-
602
+ istration and providing datasets for deep learning. Therefore,
603
+ we put the generated images into various deep learning-based
604
+ segmentation algorithms to verify the usefulness of this image
605
+ generation system for the pre-training of neural networks.
606
+ B. Deep Learning-Based Segmentation
607
+ a) Implementation Details: To validate the proposed
608
+ US simulation method and the lightweight segmentation
609
+ model, we train the proposed model on synthetic US images
610
+ and inference on real US images. We set the batch size to 32,
611
+ utilize the Adam optimizer, and set the learning rate to 1e-4.
612
+ Fig. 10.
613
+ The validation image examples. Figure (a) is the US image of a
614
+ spinal phantom in gel mimic, Figure (b) is the US image of a 3D-printed
615
+ spine in water, and Figure (c) is the US image of a clinically human body.
616
+ b) Comparison with other models: We first compare
617
+ the proposed model with other US segmentation models
618
+ in Tab. II. All performances reported in the table are re-
619
+ implemented by ourselves. Compared with UNet-based mod-
620
+ els, the proposed model is much smaller and more effective.
621
+ Compared with MViT-FPN, the proposed approach achieves
622
+ much better performance.
623
+ To utilize the model, in this case, the segmentation is to
624
+ achieve better registration, therefore we remove the cases with
625
+ obvious segmentation errors to obtain our (selected) result,
626
+ which is used in the subsequent registration. In this case,
627
+ the effect of the network trained with generated data can be
628
+ equivalent to or better than the effect of the SOTA model
629
+ trained by a large number of real US images with an accurately
630
+ labeled. Our label is acquired by ct segmentation, which is
631
+ more challenging for the convergence of lightweight neural
632
+ networks since he can label bone surfaces which is invisible
633
+ or ambiguous in some situation in US.
634
+ c) Ablation study: To demonstrate the effectiveness of
635
+ synthetic US over initial CT scans, long-range dependency,
636
+ and long-range contrast, we validate the aforementioned set-
637
+ tings and show the results in Tab. III. Training the model with
638
+ CT scans leads to a dramatic IoU decrease, which demon-
639
+ strates that the proposed method can effectively synthesize US
640
+ images. Without LCLM, the model fails to learn the long-range
641
+ contrast of US images and results in a performance decrease.
642
+ Without MViT, the IoU slightly drops, which represents that
643
+
644
+ query
645
+ key
646
+ output
647
+ (b)8
648
+ Method
649
+ Dice
650
+ CD(TP)
651
+ CD(FN)
652
+ #Parm
653
+ FPS
654
+ UNet [17]
655
+ 0.574
656
+ 1.085mm
657
+ 0.590mm
658
+ 131.8M
659
+ 4.13
660
+ MViT-FPN [25]
661
+ 0.294
662
+ 4.664mm
663
+ 0.824mm
664
+ 20.0M
665
+ 28.1
666
+ CNLUnet [19]
667
+ 0.329
668
+ 2.868mm
669
+ 1.287mm
670
+ 48.6M
671
+ 10.9
672
+ Ours
673
+ 0.783
674
+ 0.599mm
675
+ 1.079mm
676
+ 20.0M
677
+ 26.3
678
+ Ours(Selected)
679
+ 0.926
680
+ 0.227mm
681
+ 0.184mm
682
+ 20.0M
683
+ 26.3
684
+ TABLE II
685
+ COMPARISON WITH OTHER SEGMENTATION MODELS. NOTE THAT ALL
686
+ PERFORMANCES REPORTED IN THE TABLE ARE RE-IMPLEMENTED BY
687
+ OURSELVES.
688
+ the model benefits from long-range dependency, but is less
689
+ significant compared with long-range contrast.
690
+ Method
691
+ Baseline
692
+ SUS→CT
693
+ w/o MViT
694
+ w/o LCLM
695
+ Dice
696
+ 0.783
697
+ 0.040
698
+ 0.438
699
+ 0.294
700
+ CD(TP)
701
+ 0.599mm
702
+ 6.062mm
703
+ 0.910mm
704
+ 4.664mm
705
+ CD(FN)
706
+ 1.079mm
707
+ 3.938mm
708
+ 0.674mm
709
+ 0.824mm
710
+ TABLE III
711
+ THE ABLATION STUDY OF THE PROPOSED MODEL. “SUS” REPRESENTS
712
+ THE SYNTHETIC US. “SUS→CT”, “W/O MVIT” AND “W/O LCLM”
713
+ REPRESENT TRAINING THE MODEL BY CT SCANS, REMOVING MVIT
714
+ FROM THE BASELINE AND REMOVING LCLM FROM THE BASELINE,
715
+ RESPECTIVELY.
716
+ Fig. 11.
717
+ The result of segmentation. Figure(a), (b), and (c) presents the
718
+ segmented label, the output of the network, and the ground truth respectively.
719
+ C. Validation of US-CT Registration
720
+ a) Implementation Details: The US simulation system
721
+ is proposed to better obtain labeled data from multiple poses,
722
+ patients, and environments, enabling US-based surgical navi-
723
+ gation to be accomplished. In this paper, we validated whether
724
+ the output of the model trained on synthetic data can align
725
+ the preoperative CT and the planned trajectory into the intra-
726
+ operative phase, and the spinal bone surface registration for
727
+ pedicle screw placement is the instance.
728
+ The validation is divided into two parts, the first is to
729
+ validate the accuracy of the rigid body registration, i.e. trans-
730
+ lation and rotation. The second part is to validate whether
731
+ the pre-operatively planned trajectory with the aforementioned
732
+ transforms will cause complications for the surgery or not, i.e.
733
+ whether the intra-operative trajectory will touch vital organs.
734
+ The experiments were performed on three phantoms, a spine
735
+ phantom in water, a 3D-printed human spine in water, and a
736
+ bovine spine in agar gel. The registration steps were divided
737
+ into a coarse alignment and an individual registration for
738
+ each segment. The coarse registration used all point clouds
739
+ to calculate the approximate position and orientation, and the
740
+ final registration used a de-noised Iterative Closest Point(ICP)
741
+ method. The results of the registration are shown in the table
742
+ below.
743
+ Fig. 12. The phantom model for registration validation.
744
+ Fig. 13. The MSE loss of registration in three dimensions.
745
+ Fig. 14.
746
+ The Translation and Rotation of pedicle screw with registration
747
+ matrix. The rotation is presented as an angle in degrees.
748
+ b) Validation of intra-operative model registration: In
749
+ the registration phase, the model inputs the US image outputs
750
+ the segmentation label, and extracts the upper contour of its
751
+ maximum connectivity, hence the point cloud is created with
752
+ the calibration matrix of the US probe. The calibration of the
753
+ US is done by two cross-wire phantom [27], with 0.97mm
754
+ MSE(min square loss) loss. Since the coordinate system of
755
+
756
+ 3
757
+ Translation in three dimension
758
+ X
759
+ 1
760
+ y
761
+ 0
762
+ 7
763
+ -1
764
+ Li
765
+ L2
766
+ L3
767
+ L4
768
+ L5
769
+ TheLumbarsegment3
770
+ V
771
+ 1
772
+ Z
773
+ 0
774
+ -1
775
+ Angle
776
+ -2
777
+ Li
778
+ L2
779
+ L3
780
+ L4
781
+ L5
782
+ The Lumbar segment9
783
+ each segment does not coincide, the rotation evaluation in
784
+ angular will be represented by the rotation of pedicle screws in
785
+ the next part. To evaluate the error of registration, the point-
786
+ to-point MSE is used, which is also the objective function
787
+ of the ICP algorithm. The result of registration is shown in
788
+ Fig. 15, which shows the ground truth in the intra-operative
789
+ space(white model) and the results of registration with US
790
+ (green model). In summary, the segmentation algorithm trained
791
+ by the generated US image works in the registration with the
792
+ error 0.133 ∼ 3.366mm. The maximum appears in the z-axis
793
+ of L5, which is around 3 mm, which is probably caused by
794
+ the shape of L5 being more different from the shape of other
795
+ lumbar.
796
+ c) Validation of Pedicle Screw Placement Feasibility: To
797
+ verify the registration effectiveness of the system in the target
798
+ tasks, we compared the translation and rotation of the tip of the
799
+ screw with the US point cloud registration and ground truth
800
+ using an expert-planned intra-operative plan for pedicle screw
801
+ placement. The rotation is determined by the angle between
802
+ two pedicle axis vectors in 3D space which is expressed in
803
+ degrees. As shown in Fig. 14, the error in translation for
804
+ the tip of the screw ranges from 0.027 mm to 4.031 mm,
805
+ with the maximum value still occurring on the z-axis of L5,
806
+ which is consistent with the above assessment. The error in the
807
+ rotation is between 3.05 degrees and 4.47 degrees, therefore,
808
+ it can be concluded that the pre-trained model with generated
809
+ US image can perform the alignment with small errors. The
810
+ results of the pedicle screw placement are shown in figure 16,
811
+ which shows this registration does not put the patient at risk
812
+ of complications.
813
+ Fig. 15. The visualization of registration. The green part of the figure is the
814
+ registration result by US segmentation, and the white part is the ground truth.
815
+ Cases derived from 3d printed patient spine, which is (b) in the figure 12.
816
+ Fig. 16. The visualization of registration-based screw placement. The figure
817
+ shows the posture of the screws with standard length and diameter which
818
+ is planned on preoperative CT by an experienced surgeon, compared to the
819
+ intra-operative spine ground truth by the registration matrix acquired by image
820
+ segmentation. Cases derived from the standard spine phantom, which is (c)
821
+ in the figure 12.
822
+ V. CONCLUSION
823
+ In this paper, we propose a US image simulation method
824
+ based on CT images and acoustic properties that automatically
825
+ generates a US simulation environment based on a specific
826
+ patient CT volume, which allows US-based deep learning and
827
+ reinforcement learning algorithms to be trained and validated.
828
+ In addition, we propose a lightweight vision transformer for
829
+ segmenting US images which is a network structure with better
830
+ segmentation accuracy and modal generalization capabilities.
831
+ Experiments show that the model trained using US im-
832
+ ages generated by the realistic US simulation from the CT
833
+ system achieves higher segmentation accuracy and has the
834
+ capability to perform the intraoperative registration process
835
+ without complications compared to the model trained with
836
+ CT images. Compared to other segmentation algorithms, the
837
+ proposed transformer has real-time computational efficiency,
838
+ better segmentation accuracy, and generalization capability,
839
+ which is particularly important in US surgical applications.
840
+ REFERENCES
841
+ [1] K. Wang, E. Teoh, J. Jaros, and B. E. Treeby, “Modelling nonlinear
842
+ ultrasound propagation in absorbing media using the k-wave toolbox:
843
+ experimental validation,” in 2012 IEEE International Ultrasonics Sym-
844
+ posium.
845
+ IEEE, 2012, pp. 523–526.
846
+ [2] G. Ning, X. Zhang, and H. Liao, “Autonomic robotic ultrasound imaging
847
+ system based on reinforcement learning,” IEEE Transactions on Biomed-
848
+ ical Engineering, vol. 68, no. 9, pp. 2787–2797, 2021.
849
+ [3] A. Pi´orkowski and A. Kempny, “The transesophageal echocardiography
850
+ simulator based on computed tomography images,” IEEE Transactions
851
+ on Biomedical Engineering, vol. 60, no. 2, pp. 292–299, 2012.
852
+ [4] J. A. Jensen, “Field: A program for simulating ultrasound systems,” in
853
+ 10TH NORDICBALTIC CONFERENCE ON BIOMEDICAL IMAGING,
854
+ VOL. 4, SUPPLEMENT 1, PART 1: 351–353.
855
+ Citeseer, 1996.
856
+ [5] B. T. Cox and P. C. Beard, “Fast calculation of pulsed photoacoustic
857
+ fields in fluids using k-space methods,” The Journal of the Acoustical
858
+ Society of America, vol. 117, no. 6, pp. 3616–3627, 2005.
859
+ [6] B. E. Treeby, J. Jaros, A. P. Rendell, and B. Cox, “Modeling non-
860
+ linear ultrasound propagation in heterogeneous media with power law
861
+ absorption using ak-space pseudospectral method,” The Journal of the
862
+ Acoustical Society of America, vol. 131, no. 6, pp. 4324–4336, 2012.
863
+ [7] B. Burger, C. Abkai, and J. Hesser, “Simulation of dynamic ultrasound
864
+ based on ct models for medical education,” Studies in health technology
865
+ and informatics, vol. 132, p. 56, 2008.
866
+ [8] W. Cong, J. Yang, Y. Liu, and Y. Wang, “Fast and automatic ultrasound
867
+ simulation from ct images,” Computational and Mathematical Methods
868
+ in Medicine, vol. 2013, 2013.
869
+ [9] W. Wein, S. Brunke, A. Khamene, M. R. Callstrom, and N. Navab,
870
+ “Automatic ct-ultrasound registration for diagnostic imaging and image-
871
+ guided intervention,” Medical image analysis, vol. 12, no. 5, pp. 577–
872
+ 585, 2008.
873
+ [10] O. Kutter, R. Shams, and N. Navab, “Visualization and gpu-accelerated
874
+ simulation of medical ultrasound from ct images,” Computer methods
875
+ and programs in biomedicine, vol. 94, no. 3, pp. 250–266, 2009.
876
+ [11] Q. Wang, B. Peng, Z. Cao, X. Huang, and J. Jiang, “A real-time
877
+ ultrasound simulator using monte-carlo path tracing in conjunction with
878
+ optix engine,” in 2020 IEEE International Conference on Systems, Man,
879
+ and Cybernetics (SMC).
880
+ IEEE, 2020, pp. 3661–3666.
881
+ [12] J. A. Jensen, “Simulation of advanced ultrasound systems using field
882
+ ii,” in 2004 2nd IEEE International Symposium on Biomedical Imaging:
883
+ Nano to Macro (IEEE Cat No. 04EX821).
884
+ IEEE, 2004, pp. 636–639.
885
+ [13] Y. Hu, E. Gibson, L.-L. Lee, W. Xie, D. C. Barratt, T. Vercauteren,
886
+ and J. A. Noble, “Freehand ultrasound image simulation with spatially-
887
+ conditioned generative adversarial networks,” in Molecular imaging,
888
+ reconstruction and analysis of moving body organs, and stroke imaging
889
+ and treatment.
890
+ Springer, 2017, pp. 105–115.
891
+ [14] F. Tom and D. Sheet, “Simulating patho-realistic ultrasound images
892
+ using deep generative networks with adversarial learning,” in 2018
893
+ IEEE 15th international symposium on biomedical imaging (ISBI 2018).
894
+ IEEE, 2018, pp. 1174–1177.
895
+
896
+ 10
897
+ [15] B. Liu, H.-D. Cheng, J. Huang, J. Tian, X. Tang, and J. Liu, “Probability
898
+ density difference-based active contour for ultrasound image segmenta-
899
+ tion,” Pattern Recognition, vol. 43, no. 6, pp. 2028–2042, 2010.
900
+ [16] M. Talebi, A. Ayatollahi, and A. Kermani, “Medical ultrasound image
901
+ segmentation using genetic active contour,” Journal of Biomedical
902
+ Science and Engineering, vol. 4, no. 2, p. 105, 2011.
903
+ [17] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks
904
+ for biomedical image segmentation,” in International Conference on
905
+ Medical image computing and computer-assisted intervention. Springer,
906
+ 2015, pp. 234–241.
907
+ [18] G. Zhang, K. Zhao, Y. Hong, X. Qiu, K. Zhang, and B. Wei, “Sha-
908
+ mtl: soft and hard attention multi-task learning for automated breast
909
+ cancer ultrasound image segmentation and classification,” International
910
+ Journal of Computer Assisted Radiology and Surgery, vol. 16, no. 10,
911
+ pp. 1719–1725, 2021.
912
+ [19] M. B. Shuvo, R. Ahommed, S. Reza, and M. Hashem, “Cnl-unet: A
913
+ novel lightweight deep learning architecture for multimodal biomedical
914
+ image segmentation with false output suppression,” Biomedical Signal
915
+ Processing and Control, vol. 70, p. 102959, 2021.
916
+ [20] H. Pan, Q. Zhou, and L. J. Latecki, “Sgunet: Semantic guided unet
917
+ for thyroid nodule segmentation,” in 2021 IEEE 18th International
918
+ Symposium on Biomedical Imaging (ISBI).
919
+ IEEE, 2021, pp. 630–634.
920
+ [21] K. Raum, J. Reißhauer, and J. Brandt, “Frequency and resolution de-
921
+ pendence of the anisotropic impedance estimation in cortical bone using
922
+ time-resolved scanning acoustic microscopy,” Journal of Biomedical
923
+ Materials Research Part A: An Official Journal of The Society for
924
+ Biomaterials, The Japanese Society for Biomaterials, and The Australian
925
+ Society for Biomaterials and the Korean Society for Biomaterials,
926
+ vol. 71, no. 3, pp. 430–438, 2004.
927
+ [22] M. Lyons and K. Parker, “Absorption and attenuation in soft tissues. ii.
928
+ experimental results,” IEEE Transactions on Ultrasonics, Ferroelectrics,
929
+ and Frequency Control, vol. 35, no. 4, pp. 511–521, 1988.
930
+ [23] P. R. Hoskins, “Physical properties of tissues relevant to arterial
931
+ ultrasound imaging and blood velocity measurement,” Ultrasound in
932
+ medicine & biology, vol. 33, no. 10, pp. 1527–1539, 2007.
933
+ [24] I. Hacihaliloglu, “Ultrasound imaging and segmentation of bone sur-
934
+ faces: A review,” Technology, vol. 5, no. 02, pp. 74–80, 2017.
935
+ [25] S. Mehta and M. Rastegari, “Mobilevit: light-weight, general-purpose,
936
+ and mobile-friendly vision transformer,” ICLR, 2022.
937
+ [26] X. Ding, X. Zhang, Y. Zhou, J. Han, G. Ding, and J. Sun, “Scaling up
938
+ your kernels to 31x31: Revisiting large kernel design in cnns,” ICLR,
939
+ 2022.
940
+ [27] G. Carbajal, A. Lasso, L. G´omez, and G. Fichtinger, “Improving n-wire
941
+ phantom-based freehand ultrasound calibration,” in Computer Assisted
942
+ Radiology and Surgery, 2013.
943
+
89AzT4oBgHgl3EQf-_4J/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
99E0T4oBgHgl3EQfwwHR/content/tmp_files/2301.02638v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
99E0T4oBgHgl3EQfwwHR/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ANE4T4oBgHgl3EQf4w6U/content/tmp_files/2301.05316v1.pdf.txt ADDED
@@ -0,0 +1,889 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traffic Steering for 5G Multi-RAT Deployments
2
+ using Deep Reinforcement Learning
3
+ Md Arafat Habib1, Hao Zhou1, Pedro Enrique Iturria-Rivera1, Medhat Elsayed2, Majid Bavand2,
4
+ Raimundas Gaigalas2, Steve Furr2 and Melike Erol-Kantarci1, Senior Member, IEEE
5
+ 1School of Electrical Engineering and Computer Science, University of Ottawa, Ottawa, Canada
6
+ 2Ericsson Inc., Ottawa, Canada
7
+ Emails:{mhabi050, hzhou098, pitur008, melike.erolkantarci}@uottawa.ca,
8
+ {medhat.elsayed, majid.bavand, raimundas.gaigalas, steve.furr}@ericsson.com
9
+ Abstract—In 5G non-standalone mode, traffic steering is a
10
+ critical technique to take full advantage of 5G new radio while
11
+ optimizing dual connectivity of 5G and LTE networks in multiple
12
+ radio access technology (RAT). An intelligent traffic steering
13
+ mechanism can play an important role to maintain seamless
14
+ user experience by choosing appropriate RAT (5G or LTE)
15
+ dynamically for a specific user traffic flow with certain QoS
16
+ requirements. In this paper, we propose a novel traffic steering
17
+ mechanism based on Deep Q-learning that can automate traffic
18
+ steering decisions in a dynamic environment having multiple
19
+ RATs, and maintain diverse QoS requirements for different
20
+ traffic classes. The proposed method is compared with two
21
+ baseline algorithms: a heuristic-based algorithm and Q-learning-
22
+ based traffic steering. Compared to the Q-learning and heuristic
23
+ baselines, our results show that the proposed algorithm achieves
24
+ better performance in terms of 6% and 10% higher average
25
+ system throughput, and 23% and 33% lower network delay,
26
+ respectively.
27
+ Index Terms—Multi-RAT, traffic steering, reinforcement learn-
28
+ ing
29
+ I. INTRODUCTION
30
+ The dual connectivity between long term evolution (LTE)
31
+ and fifth generation new radio (5G NR) results in multiple
32
+ radio access technologies (multi-RAT) [1], [2]. On the other
33
+ hand, each type of RAT is supposed to have distinctive
34
+ capabilities to serve user equipment (UE) with diverse quality-
35
+ of-service (QoS) requirements. This raises the need of steering
36
+ a specific class of traffic to a certain RAT to fulfill the QoS
37
+ demands. For instance, high throughput video traffic can be
38
+ better served by 5G NR. On the contrary, steering voice
39
+ traffic to LTE base station (BS) with wider coverage can be a
40
+ better decision since such traffic is not throughput hungry but
41
+ requires more coverage to avoid frequent handovers. However,
42
+ steering a specific class of traffic continuously to a certain
43
+ RAT may cause several problems. The system may suffer from
44
+ higher delay due to excessive load and reduced throughput
45
+ because of the packet drops. These issues are quite challenging
46
+ to address, especially when 5G NR facilitates dense network
47
+ deployments and an increased number of users.
48
+ To address the above-mentioned challenges, an AI-enabled
49
+ traffic steering scheme emerges as a promising approach to
50
+ manage densely deployed networks with dynamic require-
51
+ ments. In recent years, AI and machine learning have been
52
+ applied to various other problems in 5G [3]. Even though
53
+ the emergence of the 5G non-stand-alone (NSA) mode has
54
+ drawn the attention of researchers recently, most existing
55
+ works linked with traffic steering lack a comprehensive tool
56
+ to overcome the complexity.
57
+ For instance, in [4], the authors propose a traffic steering
58
+ scheme based on some threshold calculated using parameters
59
+ like load at each type of RAT, channel condition, and service
60
+ type but the method lacks the intelligence to handle dynamic
61
+ wireless environments. Compared with conventional model-
62
+ based optimization methods, machine learning, especially re-
63
+ inforcement learning (RL) algorithms, can significantly reduce
64
+ the complexity of defining a dedicated optimization model
65
+ [5]. Advanced machine learning techniques like deep rein-
66
+ forcement learning (DRL) [6] can not only automate traffic
67
+ steering in a dynamic 5G wireless environment, but also it
68
+ can handle larger state-action space compared to traditional
69
+ reinforcement learning. Therefore, unlike previous works, we
70
+ propose a DRL-based traffic steering scheme that tends to per-
71
+ form RAT specific traffic steering in a multi-RAT environment
72
+ to maintain QoS requirements of different traffic classes in a
73
+ dynamic 5G NSA mode to maintain seamless network activity
74
+ and smooth user experience.
75
+ In this paper, we seek to balance the QoS demands of all the
76
+ traffic classes simultaneously by proposing a Deep-Q-network
77
+ (DQN)-based traffic steering scheme. The reward and state
78
+ functions of the proposed DQN-based traffic steering scheme
79
+ is carefully designed to have satisfactory performance based
80
+ on two crucial key performance indicators (KPIs); i.e. network
81
+ delay and average system throughput. Performance of the
82
+ proposed method is compared with two baseline algorithms:
83
+ Q-learning-based method [7] and a heuristic-based algorithm
84
+ adopted from [4]. It gains 6% and 10% increase in average
85
+ system throughput compared to the Q-learning and heuristic-
86
+ based baseline respectively. Furthermore, it achieves 23% and
87
+ 33% decrease in network delay compared to the mentioned
88
+ baselines.
89
+ The rest of the paper is organized as follows: Section II
90
+ presents the related works. We discuss the system model and
91
+ the problem formulation in Section III. Section IV covers the
92
+ proposed DQN-based traffic steering scheme along with the
93
+ baselines. The performance evaluation of the proposed DQN-
94
+ based traffic steering method is presented in Section V. Finally,
95
+ the paper is concluded in Section VI.
96
+ arXiv:2301.05316v1 [cs.NI] 12 Jan 2023
97
+
98
+ II. RELATED WORKS
99
+ In this section, we summarize the state-of-the-art literature
100
+ on traffic steering. Prasad et al. propose a dynamic traffic
101
+ steering scheme for energy efficient radio access network
102
+ moderation in ultra-dense 5G networks [8]. A unified traffic
103
+ steering scheme by Dryjanski et al. is proposed for LTE-
104
+ advanced pro, aiming at optimal radio resource allocation in
105
+ multi-RAT networks [9]. Most recently, Khaled et al. have
106
+ proposed a cell zooming technique to steer traffic in a software
107
+ defined radio-enabled LTE network that uses renewable energy
108
+ sources to lessen on-grid power consumption [10]. Gijon et
109
+ al. propose a data driven approach to perform traffic steering
110
+ in multi-carrier LTE networks in which traffic steering is
111
+ conducted based on reference signal received quality-based
112
+ handover margins [11].
113
+ Nevertheless, 5G deployments have made it more challeng-
114
+ ing to develop an elegant traffic steering scheme because of
115
+ the increased number of users and dual connectivity. Passas
116
+ et al. propose a pricing oriented network selection process
117
+ for distributed heterogeneous networks based on imposed load
118
+ pressure at a particular RAT [12]. A heuristic-based approach
119
+ proposed in [4] performs traffic steering based on a threshold
120
+ level calculated using parameters like channel condition, load
121
+ level at each RAT, and service type. Priscoli et al. address the
122
+ problem of traffic steering using a Q-learning-based solution
123
+ that aims at maintaining QoS, and performs load balancing in
124
+ a 5G heterogeneous network [13]. Different from the previous
125
+ works, this paper provides automation in the system via DRL-
126
+ based traffic steering scheme that can perform RAT specific
127
+ traffic steering in a multi-RAT environment. Furthermore, the
128
+ proposed method can maintain QoS requirements of different
129
+ traffic classes in a dynamic 5G NSA mode to maintain
130
+ seamless network activity and smooth user experience.
131
+ III. SYSTEM MODEL AND PROBLEM FORMULATION
132
+ A. System Model
133
+ In this work, a multi-RAT network is considered having
134
+ Q classes of RATs where each class of RAT, q represents a
135
+ particular access technology (LTE, 5G, etc.). Multiple users are
136
+ associated with different types of RATs via dual connectivity.
137
+ A UE can maintain K types of traffic classes. Fig. 1 presents
138
+ the network model considered in this study. We represent
139
+ three different classes of traffics: voice, gaming, and video
140
+ as TC1, TC2, and TC3 respectively in the figure. We have
141
+ designed our network environment in a way where small
142
+ cells are within the range of a macro-cell. UEs have dual
143
+ connectivity with LTE or 5G RAT and traffic can be steered
144
+ to either one of these RATs based on our proposed method.
145
+ The total downlink bandwidth, B in MHz is divided into
146
+ NRB resource blocks. A resource block contains a set of
147
+ 12 contiguous subcarriers. Consecutive resource blocks are
148
+ grouped to constitute resource block group (RBG) as defined
149
+ in [3]. Each RBG, h is allocated a certain transmission power
150
+ ph,b, by a BS, b. Based on our system model, each BS holds a
151
+ number of transmission buffers corresponding to the number of
152
+ Fig. 1.
153
+ Illustration of network environment with one LTE macro cell and
154
+ several 5G small cells.
155
+ users connected to it. Every transmission time interval (TTI),
156
+ the downlink scheduler assigns resources to the users having
157
+ pending data transmissions.
158
+ The link capacity between the UE, u and BS, b can be
159
+ formulated as follows:
160
+ Cu,b =
161
+ H
162
+
163
+ h=1
164
+ ωh log2
165
+
166
+ 1 +
167
+ ph,bxh,u,bgh,u,b
168
+ ωhN0 + �
169
+ m∈B ph,mxh,u,mgh,u,m
170
+
171
+ ,
172
+ (1)
173
+ where ωh is the bandwidth of the h, ph,b is the transmit power
174
+ of the BS, b on h, gh,u,b is the channel co-efficient and xh,u,b
175
+ is the RBG’s allocation indicator of the link (h, u, b). N0 is
176
+ the additive white Gaussian noise single-sided power spectral
177
+ density. ph,m is the transmit power of the interfering BS, m,
178
+ gh,u,m is the channel co-efficient, and xh,u,m is the allocation
179
+ indicator of link (h, u, m).
180
+ Each link has a capacity limit. Traffic flows passing through
181
+ a link should not exceed the capacity of the link in the system.
182
+
183
+ f∈F
184
+ dfxf
185
+ u,b ⩽ Cu,b
186
+ ∀(u, b) ∈ L,
187
+ (2)
188
+ where F is the set of all the flows in the network, df is the
189
+ capacity demand of the flow f ∈ F from UE, u to BS b. xf
190
+ u,b
191
+ represents a binary (0, 1) component that is ‘1’ if the link
192
+ (u, b) has been used from UE,u to BS b. It is ‘0’ otherwise.
193
+ L is the set of links and Cu,b is the capacity of link (u, b). as
194
+ presented in eq. (1)
195
+ In our system model, the delay is considered as the summa-
196
+ tion of transmission and queuing delay which is as follows:
197
+ Dk,b = DT rx
198
+ k,b + Dq
199
+ k,b,
200
+ (3)
201
+ where DT rx
202
+ k,b
203
+ is the transmission delay experienced for a
204
+ particular traffic type k and BS b, and Dq
205
+ k,b is the queuing
206
+ delay experienced for a particular traffic type k at BS b for a
207
+ user u. The transmission delay can be calculated as follows:
208
+ DT rx
209
+ k,b = Lu,b
210
+ Cu,b
211
+ ,
212
+ (4)
213
+ where Lu,b is the packet length and Cu,b is the link capacity
214
+ as stated in eq. (1).
215
+
216
+ MBS
217
+ SBS“
218
+ UE -.- TC1
219
+ TC2
220
+ TC3B. QoS Requirements and Problem Formulation
221
+ To be able to perform traffic steering for different traffic
222
+ classes with QoS requirements for delay and throughput, first
223
+ two parameters are defined based on delay and throughput. The
224
+ delay parameter associated with our traffic steering problem
225
+ is considered as the ratio of the defined QoS requirement for
226
+ delay and the actual delay experienced in the system for a
227
+ particular traffic class being carried by a certain BS. It can be
228
+ stated as follows:
229
+ rD
230
+ k,b = DQoS
231
+ Dk,b
232
+ ,
233
+ (5)
234
+ where DQoS is delay requirement defined in the simulation for
235
+ a particular traffic type and Dk,b is the actual delay achieved.
236
+ Similarly, the throughput parameter is defined as the ratio
237
+ of actual throughput achieved and the required throughput as
238
+ stated in eq. (6):
239
+ rT
240
+ k,b = Tk,b
241
+ TQoS
242
+ ,
243
+ (6)
244
+ where TQoS is the throughput requirement defined in the
245
+ simulation for a particular traffic class and Tk,b is the actual
246
+ throughput achieved.
247
+ Since our aim is to improve the system performance in
248
+ terms of the delay and throughput, a new variable is formed
249
+ to represent and meet such targets. It combines the delay and
250
+ throughput parameters in eq. (5) and (6) along with some
251
+ weight factors. The declared variable combined with delay,
252
+ throughput, and weight factors (w1 and w2) is as follows:
253
+ M = w1(rD
254
+ k,b) + w2(rT
255
+ k,b).
256
+ (7)
257
+ The traffic steering problem proposed in this paper is formu-
258
+ lated as the maximization of the variable M (presented in eq.
259
+ (7)) which is as follows:
260
+ max
261
+
262
+ u∈U
263
+
264
+ k∈K
265
+
266
+ b∈B
267
+ Mu,f,b,
268
+ s.t.
269
+
270
+ (u,b)∈L
271
+ βfk ⩾ βf
272
+ ∀f ∈ F,
273
+
274
+ (u,b)∈L
275
+ D(u, b)xf
276
+ u,b ⩽ Df
277
+ ∀f ∈ F,
278
+ (8)
279
+ where βfk is the required bitrate for a particular type of traffic
280
+ k, and βf is the available bitrate. Also, Df represents the
281
+ latency demand of flow f ∈ F and D(u, b) is the latency of
282
+ link (u, b).
283
+ IV. PROPOSED DQN-BASED TRAFFIC STEERING SCHEME
284
+ A. DQN-based Traffic Steering Scheme
285
+ For a relatively simplistic RL environment, Q-learning is
286
+ a good solution for optimization. However, as the state-
287
+ space increases, the time needed to traverse all these states
288
+ and iteratively update all the Q-values will increase which
289
+ is computationally inefficient and resource consuming. To
290
+ address this issue, DQN can be used to estimate the Q-values
291
+ for each state-action pair in a given environment using a deep
292
+ neural network (DNN) [6].
293
+ During the training stage of DQN, agent’s experiences at
294
+ each time step is stored in a data set called the replay memory.
295
+ At time τ, the agent’s experience eτ is defined as the following
296
+ tuple:
297
+ eτ = (Sτ, Aτ, Rτ+1, Sτ+1).
298
+ (9)
299
+ The tuple contains the state of the environment, the action
300
+ taken from the state, the reward given to the agent as a
301
+ result of previous state-action pair and the next state of the
302
+ environment. In short, the tuple gives us the summary of the
303
+ agent’s experience at time τ. All the agent’s experiences at
304
+ each time step over all the episodes played by the agent are
305
+ stored in the replay memory. In practice, the replay memory
306
+ is set to some finite size unit (N). Therefore, it will only
307
+ store the last N experiences. The replay memory data set is
308
+ the place from where random samples are chosen to train the
309
+ network.
310
+ The DNN in DQN takes states as inputs from the envi-
311
+ ronment and outputs the Q-values for each action that can
312
+ be taken from that state. Before the training starts, first, the
313
+ replay memory data set, D is initialized to capacity, N. Next,
314
+ DNN is initialized with random weights. For each episode,
315
+ the starting state is initialized. For each time step within the
316
+ episode, the agent either explores the environment and selects
317
+ a random action or the agent exploits the environment and
318
+ selects the greedy action for the given state that provides the
319
+ highest Q-value. This epsilon greedy policy is used to balance
320
+ the exploration and exploitation.
321
+ Aτ =
322
+
323
+ random
324
+ action,
325
+ if rand ⩽ ϵ
326
+ argmax(qτ(Sτ, Aτ)),
327
+ otherwise
328
+ (10)
329
+ where ϵ is the exploration probability within 0 ⩽ ϵ ⩽ 1 and
330
+ rand represents a random number between 0 to 1.
331
+ After an action is taken, we observe the reward for the action
332
+ along with the next state of the environment. Therefore, the
333
+ state an agent initialized from, action taken, reward observed
334
+ are all put together in a tuple as described in eq. (9).
335
+ For a single sample, the first pass to the network occurs
336
+ for the state from the experience tuple that was sampled.
337
+ The network then outputs the Q-values associated with each
338
+ possible action that can be taken from that state and then the
339
+ loss is calculated between the Q-values for the action from
340
+ the experience tuple and the target Q-value for this action. To
341
+ calculate the target Q-value, it is required to have a second pass
342
+ to the target network with the next state. The target network
343
+ is the clone of the policy network (which is also the main
344
+ network). Its weights are frozen with the weights same as
345
+ the policy network and the weights are updated in the target
346
+ network after every certain amount of time steps. The loss for
347
+ DQN is calculated using the following equation:
348
+ L(w) = Er(Rτ + γ max
349
+ A q(Sτ+1, A, w′) − q(Sτ, Aτ, w)),
350
+ (11)
351
+ where w and w′ are the weights of the main and the target
352
+ network, and Er represents the error function. Having two
353
+ NNs (main and target) ensures stability.
354
+
355
+ Fig. 2 describes the schematic of the proposed DQN-based
356
+ traffic steering where we have a main network and a target
357
+ network and minibatch from the replay memory is getting
358
+ fetched.
359
+ Fig. 2. Overall system architecture with DQN.
360
+ The mathematical formulation of DQN depends on Markov
361
+ Decision Process (MDP) that is defined by agents, states,
362
+ actions, and a reward function. Tuples associated with DQN
363
+ is defined as follows:
364
+ • Agent: We implement a centralized agent to control the
365
+ macro base station (MBS) and the small cell base stations.
366
+ It is deployed in the MBS and controls all the incoming
367
+ traffic to each BS.
368
+ • State:
369
+ The
370
+ state
371
+ consists
372
+ of
373
+ three
374
+ elements,
375
+ {Tf, LQ(SINR), qL}. Here, Tf
376
+ represents the traffic
377
+ type. It is assumed that each traffic type has fixed
378
+ QoS
379
+ requirements
380
+ and
381
+ we
382
+ can
383
+ perform
384
+ traffic
385
+ steering to a particular RAT based on that. Users
386
+ periodically
387
+ report
388
+ signal-to-interference
389
+ and
390
+ noise
391
+ ratio (SINR) measurements to the 5G base station
392
+ (gNB) and LTE base station (eNB). It indicates the
393
+ quality of the link associated with a UE and a BS.
394
+ Therefore,
395
+ the
396
+ second
397
+ element
398
+ of
399
+ state
400
+ space
401
+ is:
402
+ LQ(SINR)={SINReNB, SINRgNB}. To represent load
403
+ level, queue length of both types of RATs is used. So,
404
+ the last element of the state space is queue length,
405
+ qL={qL(gNB), qL(eNB)}.
406
+ • Action: The action space contains the action of flow
407
+ admission to the RATs. It is defined as: {ALT E, A5G}.
408
+ Here, (ALT E) stands for flow admission to the LTE RAT
409
+ , and (A5G) stands for flow admission to the 5G RAT.
410
+ • Reward: The reward function is based on eq. (7). To
411
+ keep it normalized, sigmoid function is used. Therefore,
412
+ the reward function is as follows:
413
+ R = sigm(M),
414
+ (12)
415
+ where sigm(M) represents the sigmoid function.
416
+ The proposed DQN-based traffic steering algorithm is sum-
417
+ marized as Algorithm 1.
418
+ Algorithm 1 DQN-based traffic steering
419
+ Initialize: Network and DQN parameters
420
+ 1: for TTI = 1
421
+ to
422
+ T do
423
+ 2:
424
+ for every u, b, k do
425
+ 3:
426
+ if (rand ≤ ϵ) then
427
+ 4:
428
+ choose action randomly
429
+ 5:
430
+ else
431
+ 6:
432
+ select Aτ using greedy policy
433
+ 7:
434
+ end if
435
+ 8:
436
+ BSs are selected for all the UEs for all k ∈ K
437
+ 9:
438
+ Traffic admission is performed
439
+ 10:
440
+ Reward calculation based on eq. (12)
441
+ 11:
442
+ Agent updates its own state Sτ
443
+ 12:
444
+ Save (Sτ, Aτ, Rτ+1, Sτ+1)
445
+ 13:
446
+ end for
447
+ 14:
448
+ Random sample a minibatch from the experience pool
449
+ 15:
450
+ Generate target Q-values, qτ(Sτ, Aτ)
451
+ 16:
452
+ Update w using gradient descent to minimize the loss,
453
+ L(w) = Er(qτ(Sτ, Aτ) − q(Sτ, Aτ, w))
454
+ 17:
455
+ Copy w to w′ after several training
456
+ 18: end for
457
+ 19: Output: Optimal traffic steering decisions from TTI =
458
+ 1
459
+ to
460
+ T
461
+ B. Baseline Algorithms
462
+ In this section, two baseline algorithms are introduced that
463
+ have been used for the performance comparison. The first
464
+ baseline algorithm for RAT selection is based on a predefined
465
+ threshold [4]. This is called the heuristic baseline. Here, the
466
+ threshold is calculated for each UE based on the metrics like
467
+ load at eNB (le) and gNB (lg), channel condition of a user
468
+ under LTE (che,u) and 5G BS (chg,u), service type of a user
469
+ (Su). The channel condition is determined to be good or bad
470
+ considering a threshold of received SINR values. Similarly, the
471
+ load at each RAT is determined based on a threshold value.
472
+ Based on the mentioned metrics, a value Tu is calculated that
473
+ is used for selecting the RAT for a UE after comparing it with
474
+ a predetermined threshold (Tth). Following equation is used
475
+ to calculate the value for Tu:
476
+ Tu(le, lg, che,u, Su) = αle + βlg + γchg,u + δSu,
477
+ (13)
478
+ where α, β, γ, and δ are the weights associated with consid-
479
+ ered parameters that can be modulated based on the impact of
480
+ any certain metric on system performance. Tth is set to be the
481
+ mean of all the possible values of Tu. The decision of steering
482
+ traffic to a particular RAT is taken the following way:
483
+ Ru =
484
+
485
+ 1, Tu > Tth
486
+ (1 represents gNB)
487
+ 0, Tu ⩽ Tth
488
+ (0 represents eNB).
489
+ (14)
490
+ The Q-learning algorithm has been used as another baseline
491
+ in this work [7]. The goal is to investigate how DQN performs
492
+ against the Q-learning algorithm.
493
+
494
+ Main network
495
+ Hidden layers
496
+ Action
497
+ selector
498
+ LTE
499
+ ()
500
+ Hidden layers
501
+ lnput
502
+ 5G NR
503
+ Observation
504
+ Target network
505
+ Reward: Throughput,
506
+ delay
507
+ Minibatch from
508
+ New state: Load level
509
+ experience poolV. PERFORMANCE EVALUATION
510
+ A. Simulation setup
511
+ We have conducted MATLAB based simulations consider-
512
+ ing 1 eNB and 4 gNBs with 30 users in total. There are a total
513
+ of 1 macro-cell and 4 small cells facilitated by the gNBs and an
514
+ eNB. A macro-cell and a small-cell have carrier frequencies of
515
+ 3.5 GHz and 0.8 GHz respectively. Specifications of the traffic
516
+ classes used in this study have been summarized in TABLE I.
517
+ For the experimental results, the load has been varied between
518
+ 5-10 Mbps. Proportion of the voice, video, and gaming traffic
519
+ is 20%, 50%, and 30% respectively. Higher proportion of
520
+ the video traffic is deliberately considered to observe how
521
+ the system performs with the higher throughput requirements.
522
+ Also, gaming traffic has the most stringent delay requirement
523
+ and we wanted to see if the system performs well enough
524
+ to meet such precise requirement. Therefore it has a higher
525
+ percentage compared to the voice traffic. QoS requirements
526
+ associated with delay and throughput for the three types of
527
+ traffic classes are specified based on the existing literature [14]
528
+ and 3GPP specifications (see TABLE I.). We are using multi-
529
+ RAT dual connectivity architecture, an NSA mode where LTE
530
+ and 5G NR BSs serve together. An architecture specified in
531
+ [15] has been used where the dual connectivity is ensured
532
+ via evolved packet core [16]. Transmission power of the LTE
533
+ BS and 5G NR BSs are set to 40W and 20W. Furthermore,
534
+ bandwidth for the LTE and 5G RAT are fixed to 10MHz and
535
+ 20MHz.
536
+ TABLE I
537
+ TRAFFIC CLASS DESCRIPTION AND SIMULATION SETTINGS
538
+ Traffic class specification
539
+ Values
540
+ Traffic model
541
+ Poisson distribution, video
542
+ and gaming traffic [14]
543
+ Voice traffic
544
+ Packet size
545
+ 30 bytes
546
+ TQoS, DQoS
547
+ 0.1 Mbps , 100ms
548
+ Proportion of the traffic
549
+ 20%
550
+ Video traffic
551
+ Packet size
552
+ 250 bytes
553
+ TQoS, DQoS
554
+ 10 Mbps, 80ms
555
+ Proportion of the traffic
556
+ 50%
557
+ Gaming traffic
558
+ Packet size (gaming traffic)
559
+ 120 bytes
560
+ TQoS, DQoS
561
+ 5 Mbps, 40ms
562
+ Proportion of the traffic
563
+ 30%
564
+ B. Simulation results
565
+ The performance of the proposed algorithm is evaluated in
566
+ terms of two KPIs: Average system throughput and network
567
+ delay. In Fig 3, we present a comparison in terms of system
568
+ throughput under different user loads. The proposed DQN
569
+ outperforms heuristic and Q-Learning baselines by gaining 6%
570
+ and 10% increased throughput, respectively.
571
+ Fig. 4 presents the performance comparison of the proposed
572
+ DQN-based traffic steering method with the other baselines in
573
+ terms of delay. The DQN-based method achieves 23% and
574
+ Fig. 3. System throughput against traffic load.
575
+ Fig. 4. System delay against traffic load.
576
+ 33% decrease in network delay compared to the baselines.
577
+ Note that, the proposed method and the Q-learning, both have
578
+ a reward function formulated based on throughput and delay.
579
+ Whenever high delay is experienced for steering traffic to a
580
+ particular RAT, the system learns. That is why, both of them
581
+ have better performance compared to the heuristic baseline.
582
+ In Fig. 4, delay is calculated considering all the traffic classes
583
+ together at each load.
584
+ It should be mentioned that the main reason of the improved
585
+ performance of the proposed method is the use of DQN,
586
+ that outperforms Q-learning in terms of exploration efficiency
587
+ and achieves higher average reward. Q-learning suffers due to
588
+ longer exploration period and gets lower average reward since
589
+ it does not have a DNN as an approximator which compels
590
+ the agent to cover larger state and action space.
591
+ In this work, we also want to steer a particular type of
592
+ traffic to a specific RAT. For example, steering the voice
593
+ traffic constantly to a gNB is a waste of resources since the
594
+ throughput requirement is not that high for such traffic. Fig. 5
595
+ is presented which shows what percentage of a traffic class is
596
+ processed by a particular RAT and when the traffic gets steered
597
+ due to higher load. In Fig. 5(a), it is observed that most of
598
+ the voice traffic is processed by the eNB, however, a small
599
+ portion of the traffic is processed by the gNB too whenever
600
+ the system experiences higher load. For the video and gaming
601
+ traffic, it is observed that most of the traffic is processed by
602
+ the gNB.
603
+
604
+ 300
605
+ -DQN
606
+ --Q-learning
607
+ -Heuristic baseline
608
+ 250
609
+ 200
610
+ 150
611
+ 5
612
+ 6
613
+ 7
614
+ 8
615
+ 9
616
+ 10
617
+ Load per user (Mbps)20
618
+ -Heuristic baseline
619
+ -Q-learning
620
+ +DQN
621
+ 15
622
+ (ms)
623
+ Delay (
624
+ 10
625
+ 5
626
+ 0
627
+ 5
628
+ 6
629
+ 7
630
+ 8
631
+ 9
632
+ 10
633
+ Load per user (Mbps)Fig. 5. Data processing percentage for different traffic types.
634
+ Fig. 6. Traffic steered to other RAT as load changed.
635
+ Lastly, Fig. 6 demonstrates how traffic steering occurs
636
+ whenever a high load is experienced in a BS with a particular
637
+ RAT. We start with one UE at the 300th time slot and increase
638
+ the number of UEs in a small cell up to six for different traffic
639
+ classes. The variable L, in the respective figure represents load
640
+ in terms of queue length. At the 1800th time slot, it can be
641
+ seen that four among six UEs are steering different types of
642
+ traffic to the 5G NR BS. This results in higher load and we can
643
+ see that the third and fourth UEs are experiencing high load
644
+ (value of L changed from 0 to 1). So, in the next observed
645
+ time slot, these two UEs steer the traffic to the eNB. In the
646
+ 2100th time slot, we can see four UEs steering voice, video,
647
+ and gaming traffic to the only eNB in our system. This incurs
648
+ high load at eNB and in the next observed slot we can see
649
+ that the sixth UE has switched its traffic to the gNB.
650
+ VI. CONCLUSIONS
651
+ In this study, we have proposed a novel method that can
652
+ perform RAT specific and QoS aware traffic steering using
653
+ DQN. It gains 6% and 10% increase in average system
654
+ throughput compared to the Q-learning and heuristic-based
655
+ baseline respectively. Moreover, it achieves 23% and 33%
656
+ times decrease in network delay compared to the baselines.
657
+ Apart from the better performance in terms of the KPIs, the
658
+ proposed method can perform RAT specific traffic steering
659
+ ensuring efficient use of network resources. Lastly, the pro-
660
+ posed DQN-based traffic steering can successfully perform
661
+ load balancing in an optimal way as whenever high load is
662
+ induced to a particular RAT, traffic is steered to another RAT
663
+ dynamically.
664
+ ACKNOWLEDGEMENT
665
+ This work has been supported by MITACS and Ericsson
666
+ Canada, and NSERC Collaborative Research and Training
667
+ Experience Program (CREATE) under Grant 497981.
668
+ REFERENCES
669
+ [1] V. Ramaswamy, J. T. Correia, and D. Swain-Walsh, “Modeling and
670
+ Analysis of Multi-RAT Dual Connectivity Operations in 5G Networks,”
671
+ in 2019 IEEE 2nd 5G World Forum (5GWF), pp. 484–489.
672
+ [2] R. Pirmagomedov, D. Moltchanov, A. Samuylov, A. Orsino, J. Torsner,
673
+ S. Andreev, and Y. Koucheryavy, “Characterizing Throughput and
674
+ Convergence Time in Dynamic Multi-Connectivity 5G Deployments,”
675
+ Computer Communications, vol. 187, pp. 45–58, 2022.
676
+ [3] M. Elsayed and M. Erol-Kantarci, “Radio Resource and Beam Man-
677
+ agement in 5G mmWave Using Clustering and Deep Reinforcement
678
+ Learning,” in GLOBECOM 2020 - 2020 IEEE Global Communications
679
+ Conference, 2020, pp. 1–6.
680
+ [4] M. Khaturia, P. Jha, and A. Karandikar, “5G-Flow: A Unified Multi-RAT
681
+ RAN Architecture for Beyond 5G Networks,” Computer Networks, vol.
682
+ 198, p. 108412, 2021.
683
+ [5] H. Zhou and M. Erol-Kantarci, “RAN Resource Slicing in 5G Using
684
+ Multi-Agent Correlated Q-Learning,” in Proc. IEEE PIMRC, Sep. 2021,
685
+ pp. 1–6.
686
+ [6] V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G.
687
+ Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski
688
+ et al., “Human-Level Control Through Deep Reinforcement Learning,”
689
+ nature, vol. 518, no. 7540, pp. 529–533, 2015.
690
+ [7] R. S. Sutton and A. G. Barto, Reinforcement Learning: An Introduction.
691
+ MIT press, 2018.
692
+ [8] A. Prasad, F. S. Moya, M. Ericson, R. Fantini, and O. Bulakci, “Enabling
693
+ RAN Moderation and Dynamic Traffic Steering in 5G,” in 2016 IEEE
694
+ 84th Vehicular Technology Conference (VTC-Fall), 2016, pp. 1–6.
695
+ [9] M. Dryjanski and M. Szydelko, “A Unified Traffic Steering Framework
696
+ for LTE Radio Access Network Coordination,” IEEE Communications
697
+ Magazine, vol. 54, no. 7, pp. 84–92, 2016.
698
+ [10] H. Khaled, I. Ahmad, D. Habibi, and Q. V. Phung, “A Green Traffic
699
+ Steering Solution for Next Generation Communication Networks,” IEEE
700
+ Transactions on Cognitive Communications and Networking, vol. 7,
701
+ no. 1, pp. 222–238, 2020.
702
+ [11] C. Gij´on, M. Toril, S. Luna-Ram´ırez, and M. L. Mar´ı-Altozano, “A Data-
703
+ Driven Traffic Steering Algorithm for Optimizing User Experience in
704
+ Multi-Tier LTE Networks,” IEEE Transactions on Vehicular Technology,
705
+ vol. 68, no. 10, pp. 9414–9424, 2019.
706
+ [12] V. Passas, V. Miliotis, N. Makris, and T. Korakis, “Pricing Based
707
+ Distributed Traffic Allocation for 5G Heterogeneous Networks,” IEEE
708
+ Transactions on Vehicular Technology, vol. 69, no. 10, pp. 12 111–
709
+ 12 123, 2020.
710
+ [13] F. D. Priscoli, A. Giuseppi, F. Liberati, and A. Pietrabissa, “Traffic Steer-
711
+ ing and Network Selection in 5G Networks Based on Reinforcement
712
+ Learning,” in 2020 European Control Conference (ECC), 2020, pp. 595–
713
+ 601.
714
+ [14] J. Navarro-Ortiz, P. Romero-Diaz, S. Sendra, P. Ameigeiras, J. J. Ramos-
715
+ Munoz, and J. M. Lopez-Soler, “A Survey on 5G Usage Scenarios and
716
+ Traffic Models,” IEEE Communications Surveys & Tutorials, vol. 22,
717
+ no. 2, pp. 905–929, 2020.
718
+ [15] P.
719
+ Frenger
720
+ and
721
+ R.
722
+ Tano.
723
+ (2019)
724
+ A
725
+ Technical
726
+ Look
727
+ at
728
+ 5G
729
+ Energy Consumption and Performance. [Online]. Available: https:
730
+ //www.ericsson.com/en/blog/2019/9/energy-consumption-5{G}-nr
731
+ [16] M. Agiwal, H. Kwon, S. Park, and H. Jin, “A Survey on 4G-5G Dual
732
+ Connectivity: Road to 5G Implementation,” IEEE Access, vol. 9, pp.
733
+ 16 193–16 210, 2021.
734
+
735
+ TCi(Voice traffic)
736
+ TC2(Gamingtraffic)
737
+ 100
738
+ 100
739
+ -eNB
740
+ Higher load atgNB
741
+ 80
742
+ +gNB
743
+ Mostofthetraffic
744
+ 80
745
+ +Packet drop rate
746
+ processedbyeNB
747
+ -gNB
748
+ 60
749
+ 60
750
+ +eNB
751
+ +Packet drop rate
752
+ 40
753
+ 40
754
+ SteeredtraffictoeNB
755
+ Smallpartofthetraffic
756
+ Data
757
+ 20
758
+ processed bygNB
759
+ 20
760
+ 0
761
+ 0
762
+ 6
763
+ 7
764
+ 8
765
+ 9
766
+ 10
767
+ 6
768
+ 7
769
+ 8
770
+ 9
771
+ 10
772
+ Load per user (Mbps)
773
+ Load peruser (Mbps)
774
+ (b)
775
+ (a)
776
+ TC3(Video traffic)
777
+ 100
778
+ 80
779
+ +gNB
780
+ +eNB
781
+ Mostofthetraffic
782
+ -Packet drop rate
783
+ 60
784
+ processed by gNB
785
+ 40
786
+ Smallpartofthetraffic
787
+ Data
788
+ 20
789
+ processedbyeNB
790
+ 0
791
+ 6
792
+ 8
793
+ 9
794
+ 10
795
+ Load per user (Mbps)
796
+ (c)Traffic steered
797
+ to gNB due to
798
+ eNB
799
+ eNB
800
+ L=Load ("o"
801
+ gNB
802
+ 6
803
+ high load
804
+ indicates "low"
805
+ L=0
806
+ L=1
807
+ L=0
808
+ 1 indicates “high")
809
+ gNB
810
+ gNB
811
+ gNB
812
+ gNB
813
+ 5
814
+ Traffic steered
815
+ △ eNB
816
+ to eNB due to
817
+ =O
818
+ L=0
819
+ L=0
820
+ L=0
821
+ Number of UEs
822
+ O gNB
823
+ high load
824
+ gNB
825
+ gNB
826
+ gNB
827
+ eNB
828
+ eNB
829
+ Tcl (Voice)
830
+ 4
831
+ Tc2 (Video)
832
+ TC3 (Gaming)
833
+ L=0
834
+ L=0
835
+ L=1
836
+ L=O
837
+ L=0
838
+ gNB
839
+ gNB
840
+ gNb
841
+ gNB
842
+ eNB
843
+ eNB
844
+ 3
845
+ L=0
846
+ L=0
847
+ L=0
848
+ L=1
849
+ L=0
850
+ L=0
851
+ gNB
852
+ gNB
853
+ gNB
854
+ gNB
855
+ gNB
856
+ gNB
857
+ gNB
858
+ 2
859
+ L=0
860
+ L=0
861
+ L=0
862
+ L=0
863
+ 0=1
864
+ L=0
865
+ L=0
866
+ eNB
867
+ eNB
868
+ eNB
869
+ eNB
870
+ eNB
871
+ eNB
872
+ eNB
873
+ gNB
874
+ 1
875
+ L=0
876
+ L=0
877
+ L=0
878
+ L=0
879
+ L=0
880
+ L=0
881
+ L=1
882
+ L=0
883
+ 0
884
+ 0
885
+ 300
886
+ 600
887
+ 900
888
+ 1200 1500 1800 2100 2400 ....
889
+ Time slots
ANE4T4oBgHgl3EQf4w6U/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BtE0T4oBgHgl3EQfQAAH/content/tmp_files/2301.02185v1.pdf.txt ADDED
@@ -0,0 +1,1146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Discovering Sound Free-choice Workflow Nets
2
+ With Non-block Structures
3
+ Tsung-Hao Huang[0000−0002−3011−9999] and Wil M. P. van der
4
+ Aalst[0000−0002−0955−6940]
5
+ Process and Data Science (PADS), RWTH Aachen University, Aachen, Germany
6
+ {tsunghao.huang, wvdaalst}@pads.rwth-aachen.de
7
+ Abstract. Process discovery aims to discover models that can explain
8
+ the behaviors of event logs extracted from information systems. While
9
+ various approaches have been proposed, only a few guarantee desirable
10
+ properties such as soundness and free-choice. State-of-the-art approaches
11
+ that exploit the representational bias of process trees to provide the guar-
12
+ antees are constrained to be block-structured. Such constructs limit the
13
+ expressive power of the discovered models, i.e., only a subset of sound
14
+ free-choice workflow nets can be discovered. To support a more flexible
15
+ structural representation, we aim to discover process models that provide
16
+ the same guarantees but also allow for non-block structures. Inspired by
17
+ existing works that utilize synthesis rules from the free-choice nets the-
18
+ ory, we propose an automatic approach that incrementally adds activities
19
+ to an existing process model with predefined patterns. Playing by the
20
+ rules ensures that the resulting models are always sound and free-choice.
21
+ Furthermore, the discovered models are not restricted to block struc-
22
+ tures and are thus more flexible. The approach has been implemented
23
+ in Python and tested using various real-life event logs. The experiments
24
+ show that our approach can indeed discover models with competitive
25
+ quality and more flexible structures compared to the existing approach.
26
+ Keywords: Process Discovery · Free-choice Net · Synthesis Rules.
27
+ 1
28
+ Introduction
29
+ Process discovery aims to construct process models that reflect the behaviors of
30
+ a given event log extracted from information systems [2]. As it is a non-trivial
31
+ problem, many challenges remain. In most cases, the one and only "best model"
32
+ does not exist as there are trade-offs among the four model quality metrics,
33
+ namely fitness, precision, generalization, and simplicity [2]. In addition to the
34
+ quality metrics, there exist properties that one would like to have for the discov-
35
+ ered models. One of the important properties is being a sound workflow net as
36
+ soundness ensures the absence of deadlocks, proper completion, etc. [1] and it is
37
+ a prerequisite for many crucial automated analyses such as conformance check-
38
+ ing. The other desirable structural property is being free-choice [3]. In free-choice
39
+ nets, choices and synchronizations are separated. This provides an easy conver-
40
+ sion between the discovered models and many process modeling languages such
41
+ arXiv:2301.02185v1 [cs.DB] 3 Jan 2023
42
+
43
+ 2
44
+ T. Huang and W. M. P. van der Aalst
45
+ as Business Process Modeling Notation (BPMN) since the equivalent constructs
46
+ (dedicated split and join connectors) are naturally embedded. Furthermore, free-
47
+ choice nets have been studied extensively and thus supported by an abundance
48
+ of theories [10], which provide efficient analysis techniques.
49
+ While various discovery algorithms have been proposed, only a handful of
50
+ them provides such guarantees. State-of-the-art discovery algorithms like the In-
51
+ ductive Miner (IM) [15] are able to discover sound free-choice workflow nets by
52
+ exploiting its representational bias. However, due to the same reason, the discov-
53
+ ered models are constrained to be block-structured. This limits the expressive
54
+ power of such models, i.e., only a subset of the sound free-choice workflow nets
55
+ can be discovered. As an example, Fig. 1a shows a sound free-choice workflow
56
+ net (with non-block structures) discovered by our approach1. The same language
57
+ can never be expressed by the model discovered by IM, as shown in Fig. 1b.
58
+ b
59
+ c
60
+ d
61
+ f
62
+ g
63
+ a
64
+ e
65
+ h
66
+ (a) A model discovered by our approach. The
67
+ same language cannot be expressed by the
68
+ models discovered using the Inductive Miner,
69
+ which uses process trees internally.
70
+ b
71
+ c
72
+ d
73
+ f
74
+ g
75
+ a
76
+ e
77
+ h
78
+ (b) A model discovered by the IM using the
79
+ log generated by the model in (a). The two
80
+ branches before c need to be synchronized first
81
+ before d can be executed.
82
+ Fig. 1: Examples showing the need for the non-block process models discovery. Note
83
+ that the trace ⟨a, b, c, d, e, f, g, h⟩ that is possible in (a) cannot be replayed by (b).
84
+ In this paper, we aim to discover sound free-choice workflow nets with non-
85
+ block structures. Inspired by the interactive process discovery approach in [11,12],
86
+ we develop an automatic process discovery algorithm that incrementally adds
87
+ activities to an existing net using synthesis rules [10]. Since checking the feasibil-
88
+ ity for the application of the synthesis rules is computationally expensive, we use
89
+ log heuristics to locate the most possible position for the to-be-added activity
90
+ on the existing process model instead of evaluating all possible applications of
91
+ synthesis rules as in [11]. Additionally, we identify the need for an additional
92
+ rule and extend the set of patterns introduced in [12].
93
+ Playing by the rules ensures that the discovered process models by our ap-
94
+ proach are guaranteed to be sound free-choice workflow nets [10,11]. Moreover,
95
+ the discovered models are not constrained to block structures. Last but not least,
96
+ the level of replay fitness is guaranteed via a threshold set by the users. The
97
+ approach has been implemented in Python and evaluated using various public-
98
+ available real-life event logs. The evaluation shows that our approach is able to
99
+ discover non-block structured models with competitive qualities compared to the
100
+ state-of-the-art discovery algorithm.
101
+ 1 The proposed approach has dedicated silent transitions for start and end as defined
102
+ later in Def. 5. We dropped them here for ease of comparison.
103
+
104
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
105
+ 3
106
+ The remainder of the paper is organized as follows. We review the related
107
+ work in Sec. 2 and introduce necessary concepts in Sec. 3. Sec. 4 introduces the
108
+ approach. Sec. 5 presents the experiment and Sec. 6 concludes the paper.
109
+ 2
110
+ Related Work
111
+ An overview of process discovery is out of the scope of this paper, we refer to
112
+ [7,14] for more details. In this section, we focus on process discovery techniques
113
+ that guarantee soundness (and free-choice) properties. Approaches like [6,8] can
114
+ discover non-block structured models but cannot guarantee both properties.
115
+ While Split Miner discovers models that are deadlock-free, they are not nec-
116
+ essarily sound [8].
117
+ The family of Inductive Miner (IM) algorithms [15] guarantees sound and
118
+ free-choice of the discovered models by exploiting the representational bias of
119
+ the process tree. By design, a process tree represents a sound workflow net. It is
120
+ a rooted tree where the leaf nodes are activities and the non-leaf nodes are the
121
+ operators. The hierarchical representation has a straightforward translation to
122
+ Petri net. However, the resulting models are limited to being block-structured as
123
+ a process tree can only represent process models that can be separated into parts
124
+ that have a single entry and exit [15]. Consequently, process trees can only rep-
125
+ resent a subset of sound workflow nets. The same arguments hold for approaches
126
+ that are based on process trees such as the Evolutionary Tree Miner (ETM) [9]
127
+ and the recently developed incremental process discovery approach [16].
128
+ Applying the synthesis rules [10], the interactive process discovery approaches
129
+ developed in [12,13,11] ensure soundness and free-choice properties. A semi-
130
+ automatic interactive tool, ProDiGy, is proposed in [12] to recommend the best
131
+ possible ways to add an activity to an existing model.
132
+ Our approach differs from [12,13,11] in several ways. First, we adopt an
133
+ automatic setting as the order of adding activities is predetermined and the best
134
+ modification to the existing net is selected based on the model quality. Second,
135
+ we use log heuristics to locate the most suitable position for adding the new
136
+ activity instead of evaluating all the possibilities of synthesis rules applications.
137
+ Moreover, we identify the need for a new rule as the desired models often cannot
138
+ be discovered without going back and forth by a combination of reduction and
139
+ synthesis rules [13]. Lastly, the set of patterns is extended and formally defined.
140
+ 3
141
+ Preliminaries
142
+ We denote the set of all sequences over some set A as A∗, the power set of
143
+ A as P(A), and the set of all multisets over A as B(A). For some multiset
144
+ b ∈ B(A), b(a) denotes the number of times a ∈ A appears in b. For a given
145
+ sequence σ = ⟨a1, a2, ..., an⟩ ∈ A∗, |σ| = n is the length of σ and dom(σ) =
146
+ {1, 2, ..., |σ|} is the domain of σ. ⟨⟩ is the empty sequence. σ(i) = ai denotes the
147
+ i-th element of σ. Given sequences σ1 and σ2, σ1 · σ2 denotes the concatenation
148
+ of the two. Let A be a set and X ⊆ A be a subset of A. For σ ∈ A∗ and a ∈ A,
149
+
150
+ 4
151
+ T. Huang and W. M. P. van der Aalst
152
+ we define ↾X∈ A∗→X∗ as a projection function recursively with ⟨⟩↾X = ⟨⟩,
153
+ (⟨a⟩ · σ)↾X = ⟨a⟩ · σ↾X if a ∈ X and (⟨a⟩ · σ)↾X = σ↾X if a /∈ X. For example,
154
+ ⟨x, y, x⟩↾{x,z} = ⟨x, x⟩. Projection can also be applied to multisets of sequences,
155
+ e.g., [⟨a, b, a⟩6, ⟨a, b, c⟩6, ⟨b, a, c⟩2]↾{b,c} = [⟨b⟩6, ⟨b, c⟩8].
156
+ Definition 1 (Trace, Log). A trace σ ∈ U∗
157
+ A is a sequence of activities, where
158
+ UA is the universe of activities. A log L ∈ B(U∗
159
+ A) is a multiset of traces.
160
+ Definition 2 (Log Properties). Let L ∈ B(U∗
161
+ A) and a, b ∈ UA.
162
+ – #(a, L) = Σσ∈L|{i ∈ dom(σ)|σ(i) = a}| is the times a occurred in L.
163
+ – #(a, b, L) = Σσ∈L|{i ∈ dom(σ)\{|σ|}|σ(i) = a∧σ(i+1) = b}| is the number
164
+ of direct successions from a to b in L.
165
+ – caus(a, b, L) =
166
+
167
+ #(a,b,L)−#(b,a,L)
168
+ #(a,b,L)+#(b,a,L)+1
169
+ if a ̸= b
170
+ #(a,b,L)
171
+ #(a,b,L)+1
172
+ if a = b is the strength of causal rela-
173
+ tion (a, b).
174
+ – Apre
175
+ c
176
+ (a, L) = {apre ∈ UA|caus(apre, a, L) ≥ c} is the set of a’s preceding
177
+ activities, determined by threshold c.
178
+ – Afol
179
+ c
180
+ (a, L) = {afol ∈ UA|caus(a, afol, L) ≥ c} is the set of a’s following
181
+ activities, determined by threshold c.
182
+ – As(L) = {σ(1) | σ ∈ L ∧ σ ̸= ⟨⟩} is the set of start activities in L.
183
+ – Ae(L) = {σ(|σ|) | σ ∈ L ∧ σ ̸= ⟨⟩} is the set of end activities in L.
184
+ Definition 3 (Petri Net, Labeled Petri Net). A Petri net N = (P, T, F) is
185
+ a tuple, where P is the set of places, T is the set of transitions, P ∩ T = ∅, and
186
+ F ⊆ (P × T) ∪ (T × P) is the set of arcs. A labeled Petri net N = (P, T, F, l) is
187
+ a Petri net (P, T, F) with a labeling function l ∈ T ↛ UA that maps a subset of
188
+ transitions to activities. A t ∈ T is called invisible if t is not in the domain of l.
189
+ For any x ∈ P ∪ T,
190
+ N•x = {y|(y, x) ∈ F} denotes the set of input nodes and
191
+ x
192
+ N• = {y|(x, y) ∈ F} denotes the set of output nodes. The superscript N is
193
+ dropped if it is clear from the context. The notation can be generalized to set.
194
+ For any X ⊆ P ∪ T, •X = {y|∃x∈X(y, x) ∈ F} and X• = {y|∃x∈X(x, y) ∈ F}.
195
+ Definition 4 (Free-choice Net). Let N = (P, T, F) be a Petri net. N is a
196
+ free-choice net if for any t1, t2 ∈ T : •t1 = •t2 or •t1 ∩ •t2 = ∅.
197
+ Definition 5 (Workflow Net (WF-net) [1,11]). Let N = (P, T, F, l) be a
198
+ labeled Petri net. W = (P, T, F, l, ps, pe, ⊤, ⊥) is a WF-net iff (1) it has a dedi-
199
+ cated source place ps ∈ P: •ps = ∅ and a dedicated sink place pe ∈ P: pe• = ∅
200
+ (2) ⊤ ∈ T: •⊤ = {ps}∧ps• = {⊤} and ⊥ ∈ T: ⊥• = {pe}∧•pe = {⊥} (3) every
201
+ node x is on some path from ps to pe, i.e., ∀x∈P ∪T (ps, x) ∈ F ∗ ∧ (x, pe) ∈ F ∗,
202
+ where F ∗ is the reflexive transitive closure of F.
203
+ Definition 6 (Short-circuited WF-net [1]). Let W = (P, T, F, l, ps, pe, ⊤, ⊥)
204
+ be a WF-net. The short-circuited WF-net of W, denoted by SC(W), is con-
205
+ structed by SC(W)=(P, T ∪{t′}, F ∪{(⊥, t′), (t′, ⊤)}, l, ps, pe, ⊤, ⊥), where t′ /∈ T.
206
+
207
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
208
+ 5
209
+ Definition 7 (Paths, Elementary Paths). A path of a Petri net N = (P, T, F)
210
+ is a non-empty sequence of nodes ρ = ⟨x1, x2, ..., xn⟩ such that (xi, xi+1) ∈ F for
211
+ 1 ≤ i < n. ρ is an elementary path if xi ̸= xj for 1 ≤ i < j ≤ n.
212
+ Definition 8 (Incidence Matrix [10]). Let N = (P, T, F) be a Petri net. The
213
+ incidence matrix N : (P × T) → {−1, 0, 1} of N is defined as
214
+ N(p, t) =
215
+
216
+
217
+
218
+
219
+
220
+ 0
221
+ if ((p, t) /∈ F ∧ (t, p) /∈ F) ∨ ((p, t) ∈ F ∧ (t, p) ∈ F)
222
+ −1
223
+ if (p, t) ∈ F ∧ (t, p) /∈ F
224
+ 1
225
+ if (p, t) /∈ F ∧ (t, p) ∈ F
226
+ For a Petri net N = (P, T, F) and its corresponding incidence matrix N, we use
227
+ N(p) to denote the row vector of the corresponding p ∈ P and N(t) to denote
228
+ the column vector of the corresponding t ∈ T.
229
+ Definition 9 (Linearly Dependent Nodes [10]). Let N = (P, T, F) be a
230
+ Petri net. Q is the set of rational numbers. A place p is linearly dependent if
231
+ there exists a row vector ⃗v : P → Q such that ⃗v(p) = 0 and ⃗v · N = N(p). A
232
+ transition t is linearly dependent if there exists a column vector ⃗v : T → Q such
233
+ that ⃗v(t) = 0 and ⃗v · N = N(t).
234
+ Definition 10 (Synthesis Rules [10,11]). Let W and W ′ be two free-choice
235
+ workflow nets, and let SC(W) = (P, T, F, l, ps, pe, ⊤, ⊥) and SC(W ′) = (P ′, T ′,
236
+ F ′, l′, ps, pe, ⊤, ⊥) be the corresponding short-circuited WF-nets:
237
+ – Linear Dependent Place Rule ψP : W ′ is derived from W using ψP , i.e.,
238
+ (W, W ′) ∈ ψP if (1) T ′ = T, P ′ = P ∪ {p} and p /∈ P is linear dependent in
239
+ SC(W ′), F ′ = F ∪ �F where �F ⊆ (({p} × T) ∪ (T × {p})) (2) Every siphon
240
+ in SC(W ′) contains ps.
241
+ – Linear Dependent Transition Rule ψT : W ′ is derived from W using ψT , i.e.,
242
+ (W, W ′) ∈ ψT if P ′ = P, T ′ = T ∪ {t} and t /∈ T is linear dependent in
243
+ SC(W ′) and F ′ = F ∪ �F where �F ⊆ ((P ×{t})∪({t}×P)), and ∀t∈T ∩T ′l(t) =
244
+ l′(t).
245
+ – Abstraction Rule ψA: (W, W ′) ∈ ψA if (1) there exists a set of transitions
246
+ R ⊆ T and a set of places S ⊆ P such that (R × S ⊆ F) ∧ (R × S ̸= ∅). (2)
247
+ SC(W ′) is constructed by adding an additional place p /∈ P and a transition
248
+ t /∈ T such that P ′ = P ∪ {p}, T ′ = T ∪ {t}, F ′ = (F\(R × S)) ∪ ((R × {p}) ∪
249
+ ({p} × {t}) ∪ ({t} × S)), and ∀t∈T ∩T ′l(t) = l′(t).
250
+ Applying the three synthesis rules (ψP , ψT , ψA) to derive W ′ from a sound
251
+ free-choice workflow net W ensures that W ′ is also sound [13,11]. Three proper-
252
+ ties need to be hold for a WF-net to be sound (1) safeness: places cannot hold
253
+ multiple tokens at the same time (2) option to complete: it is always possible to
254
+ reach the marking in which only the sink place is marked. (3) no dead transitions.
255
+ Next, we introduce the initial net [11] and show some examples of synthesis rules
256
+ applications.
257
+
258
+ 6
259
+ T. Huang and W. M. P. van der Aalst
260
+ 𝑝𝑠
261
+ 𝑝𝑒
262
+
263
+
264
+ 𝑝1
265
+ (a)
266
+ h
267
+ 𝑝𝑠
268
+ 𝑝𝑒
269
+
270
+
271
+ 𝑝1
272
+ 𝑡1
273
+ 𝑝2
274
+ (b)
275
+ g
276
+ h
277
+ 𝑝𝑠
278
+ 𝑝𝑒
279
+
280
+
281
+ 𝑝1
282
+ 𝑝2
283
+ 𝑝3
284
+ 𝑡2
285
+ 𝑡1
286
+ (c)
287
+ g
288
+ h
289
+ 𝑝𝑠
290
+ 𝑝𝑒
291
+
292
+
293
+ 𝑝1
294
+ 𝑝2
295
+ 𝑝3
296
+ 𝑡2
297
+ 𝑡1
298
+ 𝑝4
299
+ (d)
300
+ Fig. 2: Examples of synthesis rules applications starting from (a) The initial net. (b)
301
+ Using ψA, p2 and t1 are added to the initial net with R = {⊤} and S = {p1}. (c) Using
302
+ ψA, p3 and t2 are added to previous net with R = {⊤} and S = {p2}. (d) p4 is added
303
+ using ψp as p4 is a linear combination of p3 and p2.
304
+ Definition 11 (Initial Net [13]). Let W = (P, T, F, l, ps, pe, ⊤, ⊥) be a free-
305
+ choice WF-net. W is an initial net if P = {ps, p1, pe}, T = {⊤, ⊥}, F =
306
+ {(ps, ⊤), (⊤, p1), (p1, ⊥), (⊥, pe)}.
307
+ The initial net is shown in Fig. 2a. Clearly, it is a sound free-choice workflow
308
+ net. Starting from the initial net, one can incrementally add additional nodes
309
+ according to the synthesis rules. Fig. 2 shows example applications of synthesis
310
+ rules starting from the initial net.
311
+ 4
312
+ Approach
313
+ With the necessary concepts introduced, we are now ready to introduce the ap-
314
+ proach. We start by showing the basic idea of the approach with the help of
315
+ Fig. 3 before diving into each step in detail. Internally, the approach incremen-
316
+ tally adds a new activity to an existing net. The figure shows a single iteration.
317
+ In each iteration, we have an existing model from the previous iteration and a
318
+ log projected on the already added activities so far and the to-be-added one.
319
+ We start by locating the most likely position to add the new activity deter-
320
+ mined by log heuristics. The result of this step is a subset of nodes of the existing
321
+ model. The set of nodes will then be used to prune the search space. Then, the
322
+ predefined patterns are applied to the existing net to get a set of candidate nets.
323
+ Lastly, we select the best net (next existing net) out of the candidates in terms of
324
+ fitness and precision. Note that the existing net in the first iteration is initiated
325
+ by the initial net (Def. 11). As a running example, consider the correspond-
326
+ ing log that is used to discover the Petri net in Fig.1a by our approach: Ls =
327
+ [⟨a, b, c, d, f, g, h⟩22, ⟨a, b, c, f, d, g, h⟩14, ⟨a, e, b, c, d, f, g, h⟩13, ⟨a, e, b, c, f, d, g, h⟩13,
328
+ ⟨a, e, b, c, f, g, d, h⟩10, ⟨a, b, c, f, g, d, h⟩10, ⟨a, b, e, c, d, f, g, h⟩6, ⟨a, b, e, c, f, g, d, h⟩3,
329
+ ⟨a, b, e, c, f, d, g, h⟩3, ⟨a, b, c, d, e, f, g, h⟩2, ⟨a, b, c, e, d, f, g, h⟩2, ⟨a, b, c, e, f, g, d, h⟩1,
330
+ ⟨a, b, c, e, f, d, g, h⟩1]. The instances provided in Fig. 3 shows the 3rd iteration for
331
+ the running example Ls. In the following subsections, we introduce the details
332
+ of each step.
333
+
334
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
335
+ 7
336
+ (1) Pruning search
337
+ space using log
338
+ heuristics
339
+ (2) Add new activity to
340
+ the existing net with
341
+ pre-defined patterns
342
+ [ 𝑑, 𝑔, ℎ 76, 𝑔, 𝑑, ℎ 24]
343
+ Projected Log 𝐿𝑖
344
+ Existing Net
345
+ 𝑊𝑖 = (𝑃𝑖, 𝑇𝑖, 𝐹𝑖, 𝑙𝑖, 𝑝𝑠, 𝑝𝑒, ⊤, ⊥)
346
+ (3) Select the best net
347
+ for the next iteration
348
+ Next Existing Net
349
+ 𝑊𝑖+1 = (𝑃𝑖+1, 𝑇𝑖+1, 𝐹𝑖+1, 𝑙𝑖+1, 𝑝𝑠, 𝑝𝑒, ⊤, ⊥)
350
+ To-be-added Activity 𝛾(𝑖)
351
+ 𝑉𝑖 ⊆ 𝑃𝑖 ∪ 𝑇𝑖
352
+ Set of Candidate Nets 𝐶𝑖
353
+ skip
354
+ loop
355
+
356
+
357
+ Fig. 3: An example of a single iteration of our approach.
358
+ 4.1
359
+ Ordering Strategies for Adding Activities
360
+ Before starting any iteration, we need to come up with an order for adding ac-
361
+ tivities based on a given log L. It is important as the quality of the discovered
362
+ models often depends on the order of adding activities [11]. Moreover, in combi-
363
+ nation with the search space pruning, it can influence the computation time for
364
+ each iteration significantly. In this paper, we introduce two ordering strategies.
365
+ The first one is relatively straightforward. The activities in L are simply ordered
366
+ by their frequency.
367
+ Definition 12 (Activities-Adding Order, Frequency-Based Ordering).
368
+ Let L ∈ B(U∗
369
+ A) and A = �
370
+ σ∈L{a ∈ σ}. γ ∈ A∗ is an activities-adding order for
371
+ L if {a ∈ γ} = A and |γ| = |A|. The frequency-based ordering is orderfreq(L) = γ
372
+ such that γ is an activities-adding order and ∀1≤i<j≤|γ|#(γ(i), L) ≥ #(γ(j), L).
373
+ The second ordering strategy is similar to the Breadth-first Search (BFS) al-
374
+ gorithm. The advantage of this is that it also considers the closeness between
375
+ activities in the log, rather than just frequency. To explain this ordering strategy,
376
+ we first define a sub-function.
377
+ Definition 13 (Directly-Precedes Activities Sorting). Let L ∈ B(U∗
378
+ A) and
379
+ a ∈ UA. A={b ∈ UA|#(b, a, L)>0} is the set of activities directly-precede a in L at
380
+ least once and σ ∈ A∗. Directly-precedes activities sorting is sortPreceded(a, L)=σ
381
+ such that {b ∈ σ} = A and |σ| = |A| and ∀1≤i<j≤|σ| #(σ(i), a, L) ≥ #(σ(j), a, L).
382
+ The function sortPreceded takes an activity a and a log L to return a sequence of
383
+ a’s directly-preceded activities b that are sorted by the frequency of #(b, a, L).
384
+ Finally, we can define the BFS-based ordering strategy.
385
+ Definition 14 (Breadth-First-Search-Based Ordering). Let L ∈ B(U∗
386
+ A)
387
+ and A= �
388
+ σ∈L{a ∈ σ}. BFS-based ordering is defined as orderBFS(L)=γ, where
389
+ γ is an activities adding order for L and γ=γ1 · γ2 · ... · γ|γ|, for each 1 ≤ j ≤ |γ|,
390
+ γj =
391
+
392
+ orderfreq(L↾Ae(L))
393
+ if j = 1
394
+ sortPreceded(γ(j − 1), L)↾A\{γ(1),γ(2),...,γ(j−1)}
395
+ otherwise
396
+
397
+ d
398
+ h
399
+ gdps
400
+ 4
401
+ 0
402
+ d
403
+ h
404
+ gd
405
+ hh
406
+ g8
407
+ T. Huang and W. M. P. van der Aalst
408
+ The function starts by sorting the end activities Ae(L) according to their fre-
409
+ quency in the log. Then, it enumerates through the sequence γ and sorts the
410
+ preceded activities of γ(j − 1) by the frequency of direct successions. The pro-
411
+ jection function in the second case of Def.14 filters out the activities that are
412
+ already in γ.
413
+ Compared to the frequency-based ordering, the BFS-based ordering considers
414
+ the closeness of the activities. This allows us to add activities that are close
415
+ together. Together with the effect of the search space pruning, it is expected that
416
+ BFS-based ordering would have less computation time. Applying the function
417
+ orderBFS to our running example, Ls, we get the activities adding order as
418
+ γ = orderBFS(Ls) = ⟨h⟩ · ⟨g, d⟩ · ⟨f⟩ · ⟨c, e⟩ · ⟨⟩ · ⟨b⟩ · ⟨a⟩ · ⟨⟩ = ⟨h, g, d, f, c, e, b, a⟩.
419
+ γ is then used to determine the order of adding activities. Given the activities
420
+ adding order γ, we define the artifacts for each iteration i as followed.
421
+ Definition 15 (Projected Log). Let L ∈ B(U∗
422
+ A) and γ be a activities adding
423
+ order for L. The projected log for L in the i-th iteration is Li = L↾{γ(1),γ(2),...γ(i)}.
424
+ For instance, the projected log for the running example Ls for the 3rd iteration
425
+ is then L3
426
+ s = Ls↾{h,g,d} = [⟨d, g, h⟩76, ⟨g, d, h⟩24], as shown in Fig. 3. The to-be-
427
+ added activity is denoted as γ(i), which is γ(3) = d for the 3rd iteration. Also,
428
+ we denote the existing sound free-choice workflow net for iteration i as W i. Note
429
+ that for the running example, W 1, W 2, and W 3 are visualized in Fig. 2a, 2b,
430
+ and 2c, respectively.
431
+ 4.2
432
+ Search Space Pruning
433
+ As checking the feasibility of applying linear dependent rules ψT , ψP is compu-
434
+ tationally expensive [11], it is impractical to compute all possible applications
435
+ of the synthesis rules. Also, some of them are not of interest. For example, as
436
+ shown in Fig. 3, it is clear that the to-be-added activity d never happens after
437
+ h in the projected log. Using such information, we can already eliminate the
438
+ constructs (applications of synthesis rules) that allow activity d to be executed
439
+ after h. Therefore, in each iteration i we start by locating the most likely posi-
440
+ tion to add γ(i). This helps us to restrict the application of synthesis rules on
441
+ only a subset of nodes, denoted as V i ⊆ P i ∪ T i, in the existing net W i. To do
442
+ that, we first identify the set of preceding and following activities of γ(i) in the
443
+ projected log Li, which would be Apre
444
+ c
445
+ (γ(i), Li) and Afol
446
+ c
447
+ (γ(i), Li) respectively.
448
+ Recall that c is a threshold for the causal relation and can be given by users
449
+ as an input. We use c = 0.9 as the default value. Then, the corresponding la-
450
+ belled transitions are identified in W i. Finally, V i is the set of all the nodes on
451
+ the elementary paths from the preceding transitions to the following transitions.
452
+ If Apre
453
+ c
454
+ (γ(i), L)/Afol
455
+ c
456
+ (γ(i), L) is an empty set, we use the ⊤/⊥ transitions. For
457
+ instance, in Fig. 3, we identify that Apre
458
+ c
459
+ (d, L3
460
+ s)=∅ and Afol
461
+ c
462
+ (d, L3
463
+ s)={h}. There-
464
+ fore, we find all the nodes on the elementary paths between every node in {⊤}
465
+ and every node in {t1}. As a result, the set V 3 = {⊤, p3, t2, p2, t1} is used to
466
+ prune the search space, i.e., to constrain the application of synthesis rules.
467
+
468
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
469
+ 9
470
+ Constraining synthesis rules For the abstraction rule ψA, this means that the
471
+ set of transitions R and the set of places S used as the preconditions for applying
472
+ ψA need to be a subset of V , i.e., S ⊆ V ∧R ⊆ V . For the linear dependent rules
473
+ ψP /ψT , the new place/transition (p′/t′) cannot have arcs connected to any node
474
+ outside V . This shortens the computation time as certain rules applications can
475
+ be removed and there is no need to check their feasibility.
476
+ 4.3
477
+ Patterns
478
+ In this section, we introduce the patterns that are used to add activity γ(i) to
479
+ the existing free-choice workflow net W i. First, we motivate the need for an
480
+ additional rule.
481
+ The need for an additional rule It is proven that any sound free-choice
482
+ workflow net can be constructed by the three synthesis rules ψA, ψP , ψT [10,11].
483
+ However, when applying to discover process models, the desirable model is often
484
+ not possible to derive due to the existing construct. An example is shown in
485
+ Fig. 4. While it is possible to add a transition labeled by a in Fig. 4a, it is not
486
+ possible to derive the same net in Fig. 4b as there is no rule allowing such trans-
487
+ formation. One possible workaround is to go back and forth by a combination
488
+ of reduction and synthesis rules as suggested in [13]. However, once the existing
489
+ net becomes more complex, such a solution becomes infeasible to track.
490
+ b
491
+ c
492
+ L↾〈c,b〉 = [〈c〉3, 〈b〉3]
493
+ b
494
+ c
495
+ a
496
+ L↾〈a,b,c〉 = [〈c,a〉3, 〈b〉3]
497
+ (a)
498
+ L↾〈a,b〉 = [〈a〉3, 〈b〉3]
499
+ L↾〈a,b,c〉 = [〈c,a〉3, 〈b〉3]
500
+ b
501
+ c
502
+ a
503
+ b
504
+ a
505
+ ?
506
+ (b)
507
+ Fig. 4: Examples showing the motivation for the dual abstraction rule ψD. Although
508
+ the desirable nets on the right-hand side of (a) and (b) are semantically the same, the
509
+ existing synthesis rules only allow the transformation in (a). There is no rule defined
510
+ for the transformation in (b).
511
+ We observe that in many situations (including the example in Fig. 4b), the
512
+ desired models cannot be constructed because there is no rule allowing one to
513
+ introduce a new transition t and a new place p in between a set of places S and
514
+ a set of transitions R that are fully connected, i.e., S × R ⊆ F. Therefore, we
515
+ define the dual abstraction rule to allow such construct.
516
+ Definition 16 (Dual Abstraction Rule ψD). Let W = (P, T, F, l, ps, pe, ⊤, ⊥)
517
+ and W ′ = (P ′, T ′, F ′, l′, ps, pe, ⊤, ⊥) be two free-choice workflow nets. (W, W ′) ∈
518
+ ψD if (1) there exists a set of places S ⊆ P and a set of transitions R ⊆ T such
519
+ that S × R ⊆ F ∧ S × R ̸= ∅. (2) W ′ is constructed by adding an additional
520
+
521
+ 10
522
+ T. Huang and W. M. P. van der Aalst
523
+ transition t /∈ T and a place p /∈ P such that P ′ = P ∪ {p}, T ′ = T ∪ {t}, F ′ =
524
+ (F \ (S × R)) ∪ ((S × {t}) ∪ ({t} × {p}) ∪ ({p} × R)), and ∀t∈T ∩T ′l(t) = l′(t).
525
+ As we are only interested in sound free-choice workflow nets, we need to make
526
+ sure that the dual abstraction rule ψD preserves soundness.
527
+ Proposition 1 (ψD preserves soundness). Let W = (P, T, F, l, ps, pe, ⊤, ⊥),
528
+ W ′ = (P ′, T ′, F ′, l′, ps, pe, ⊤, ⊥) be free-choice workflow nets, and W ′ is derived
529
+ from W using ψD, i.e., (W, W ′) ∈ ψD. Then W ′ is sound if W is sound.
530
+ Proof. Let t′ ∈ T ′\T and p′ ∈ P ′\P be the new transition and place in W ′. Let
531
+ R = p′• and S = •t′. The new net W ′ is free-choice in only two cases. Either
532
+ S =
533
+ W• R or R = S
534
+ W• . In either case, any reachable marking in (W, [ps]) that
535
+ does not need to fire tR ∈ R is still reachable in (W ′, [ps]). Also, the reachable
536
+ markings in (W, [ps]) that need to fire tR ∈ R can be reached in (W ′, [ps]) as one
537
+ can just add t′ somewhere before tR in the corresponding firing sequence. Then,
538
+ it is trivial to see that W ′ fulfils the three conditions of soundness if W is also
539
+ sound.
540
+
541
+ Next, we extent the linear dependent place rule ψP . As we aim to add a transition
542
+ labeled by γ(i) to the existing labeled free-choice workflow net W i, only adding
543
+ a place p′ by ψP does not suffice. Hence, in our approach, an application of ψP
544
+ is always coupled with a directly followed application of abstraction rule ψA to
545
+ include a transition. ψA is applied between the added place p′ and its preset •p′.
546
+ This is possible because every transition in •p′ is connected to every place in
547
+ {p′} by definition, which satisfies the precondition of ψA. An example is shown
548
+ in Fig. 5, p5 and t3 are added by ψA directly after the addition of p4 by ψP . To
549
+ be more precise, we define the extended rule, ψ′
550
+ P , that describes the pattern.
551
+ Definition 17 (Extended Linear Dependent Place Rule ψ′
552
+ P ). Let W=(P,
553
+ T, F, l, ps, pe, ⊤, ⊥) and W ′′=(P ′′, T ′′, F ′′, l′′, ps, pe, ⊤, ⊥) be free-choice workflow
554
+ nets. (W, W ′′)∈ψ′
555
+ P if (1) ∃W ′=(P ′,T ′,F ′,l′,ps,pe,⊤,⊥)(W, W ′)∈ψP ∧ (W ′, W ′′)∈ψA
556
+ and (2) ∃!p∗∈P ′′({p∗}=P ′\P) ∧ (((T ′′\T ′) × {p∗}) ⊂ F ′′) ∧ ((T ′ × {p∗}) ̸⊂ F ′′).
557
+ g
558
+ h
559
+ 𝑝𝑠
560
+ 𝑝𝑒
561
+
562
+
563
+ 𝑝1
564
+ 𝑝2
565
+ 𝑝3
566
+ 𝑡2
567
+ 𝑡1
568
+ 𝑝4
569
+ (a)
570
+ g
571
+ h
572
+ 𝑝𝑠
573
+ 𝑝𝑒
574
+
575
+
576
+ 𝑝1
577
+ 𝑝2
578
+ 𝑝3
579
+ 𝑡2
580
+ 𝑡1
581
+ 𝑝4
582
+ d
583
+ 𝑝5
584
+ 𝑡3
585
+ (b)
586
+ Fig. 5: (a) ψP adds a place p4. (b) As every transition in •p4 has an arc to every place
587
+ in {p4}, one can directly apply ψA to add p5 and t3.
588
+ Then, we define the set of nets constructed by every possible single application
589
+ of the rules ψA, ψ′
590
+ P , ψT , ψD.
591
+
592
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
593
+ 11
594
+ Definition 18 (Base Candidates Set). Let W=(P, T, F, l, ps, pe, ⊤, ⊥), W ′ =
595
+ (P ′, T ′, F ′, l′, ps, pe, ⊤, ⊥) be free-choice workflow nets. Let X=(P ′∪T ′)\(P∪T),
596
+ V ⊆P∪T, V ′=(P∪T)\V , and let a ∈ UA be an activity label. The base candidates
597
+ set is base(W, V, a)={W ′|((W, W ′) ∈ (ψA ∪ψT ∪ψ′
598
+ P ∪ψD))∧(∄x∈X(({x}×V ′)∪
599
+ (V ′ × {x})) ⊆ F ′) ∧ (l′=l ∪ ((T ′ \ T) × {a}))}.
600
+ The base candidates set Ci
601
+ base=base(W i, V i, γ(i)) consists of the nets that are
602
+ constructed by every possible single application of the rules ψA, ψ′
603
+ P , ψT , ψD to
604
+ add a transition labeled by γ(i) to W i considering the the constraints on V i.
605
+ Next, we introduce three patterns that make a transition skippable, in a strict
606
+ loop, or in an optional (tau) loop. A transition in a strict loop means that the
607
+ execution of the transition is required, otherwise it is an optional loop.
608
+ Definition 19 (Pattern-Building Functions). Let W=(P, T, F, l, ps, pe, ⊤, ⊥)
609
+ and W ′ = (P ′, T ′, F ′, l′, ps, pe, ⊤, ⊥) be two free-choice workflow nets. Let a ∈ UA
610
+ be an activity label and ta ∈ T : l(ta) = a be the corresponding transition in W.
611
+ We define the three pattern-building functions2 as
612
+ – skip(W, a) = W ′ such that
613
+ – (W, W ′) ∈ ψT
614
+ – F ′ = F ∪ ({t′} × ta•) ∪ (•ta × {t′}) (where t′ ∈ T ′\T)
615
+ – l′ = l (t′ is a silent transition)
616
+ – loops(LW, a) is defined by two cases:
617
+ 1. if ∄t∗∈((ta•)•)(|•t∗| > 1)∧(•t∗\ta• ̸= ∅), then loops(W, a) = W ′ such that
618
+ – (W, W ′) ∈ ψT
619
+ – F ′ = F ∪ (ta• × {t′}) ∪ ({t′} × •ta) (where t′ ∈ T ′\T)
620
+ – l′ = l (t′ is a silent transition)
621
+ 2. otherwise, return loops(W ′, a) such that
622
+ – (W, W ′) ∈ ψA
623
+ – (({ta} × (P ′\P)) ∈ F ′) ∧ (({ta} × P) /∈ F ′)
624
+ – l′ = l
625
+ – loopτ(W, a) is defined by two cases:
626
+ 1. if ∄t∗∈((ta•)•)(|•t∗| > 1) ∧ (•t∗\ta• ̸= ∅), then loopτ(W, a)=W ′ such that
627
+ – (W, W ′) ∈ ψT
628
+ – F ′ = F ∪ (ta• × {t′}) ∪ ({t′} × •ta) (where t′ ∈ T ′\T)
629
+ – l′ = (l\{(ta, a)}) ∪ {(t′, a)} (the labels of ta and t′ are swapped)
630
+ 2. otherwise, return loopτ(W ′, a) such that
631
+ – (W, W ′) ∈ ψA
632
+ – (({ta} × (P ′\P)) ∈ F ′) ∧ (({ta} × P) /∈ F ′)
633
+ – l′ = l
634
+ The second case of the loop functions is there to keep the free-choice property.
635
+ To illustrate the ideas using the running example, consider the net shown in
636
+ Fig. 6a as the input net W and t3 (labeled by d) is the transition for which we
637
+ are going to apply the functions to derive patterns. Fig. 6b shows that function
638
+ 2 The input/output nodes notations (•) used in Def. 19 refer to the input net W. We
639
+ drop the superscript for readability.
640
+
641
+ 12
642
+ T. Huang and W. M. P. van der Aalst
643
+ g
644
+ h
645
+ 𝑝𝑠
646
+ 𝑝𝑒
647
+
648
+
649
+ 𝑝1
650
+ 𝑝2
651
+ 𝑝3
652
+ 𝑡2
653
+ 𝑡1
654
+ 𝑝4
655
+ d
656
+ 𝑝5
657
+ 𝑡3
658
+ (a) the net (W ). t3 (labeled by d)
659
+ is the target transition.
660
+ g
661
+ h
662
+ 𝑝𝑠
663
+ 𝑝𝑒
664
+
665
+
666
+ 𝑝1
667
+ 𝑝2
668
+ 𝑝3
669
+ 𝑡2
670
+ 𝑡1
671
+ 𝑝4
672
+ d
673
+ 𝑝5
674
+ 𝑡3
675
+ 𝑡4
676
+ (b) skip(W, d) adds a silent tran-
677
+ sition t4 that makes t3 skippable.
678
+ g
679
+ h
680
+ 𝑝𝑠
681
+ 𝑝𝑒
682
+
683
+
684
+ 𝑝1
685
+ 𝑝2
686
+ 𝑝3
687
+ 𝑡2
688
+ 𝑡1
689
+ 𝑝6
690
+ d
691
+ 𝑝5
692
+ 𝑡3
693
+ 𝑝4
694
+ 𝑡4
695
+ (c) an intermediate net W ′ (be-
696
+ tween (a) and (d)) constructed to
697
+ keep the free-choice property.
698
+ g
699
+ h
700
+ 𝑝𝑠
701
+ 𝑝𝑒
702
+
703
+
704
+ 𝑝1
705
+ 𝑝2
706
+ 𝑝3
707
+ 𝑡2
708
+ 𝑡1
709
+ 𝑝6
710
+ d
711
+ 𝑝5
712
+ 𝑡3
713
+ 𝑝4
714
+ 𝑡4
715
+ 𝑡5
716
+ (d)
717
+ the
718
+ resulting
719
+ net
720
+ of
721
+ loops(W, d),
722
+ which
723
+ makes
724
+ t3
725
+ in a loop.
726
+ Fig. 6: Examples showing how the functions are applied to derive patterns.
727
+ skip(W, d) simply adds a silent transition t4 with the same connection as t3 to
728
+ W. Fig. 6c and 6d show an application of loops(W, d) and illustrate the need
729
+ for the two cases for the loop functions. As shown in Fig. 6c, the second case
730
+ of loops is applied since there exists a transition t1 ∈ ((t3•)•) with more than
731
+ one place in its preset (| • t1| > 1) and •t1\t3• ̸= ∅. Therefore, W ′ (Fig. 6c) is
732
+ first constructed by adding p6 and t4. Then, the function returns loops(W ′, d).
733
+ Now, the first case should be applied. In this case, t5 is added with the reverse
734
+ connections of t3. As indicated, the second case in the loop functions helps to
735
+ keep the free-choice property. Imagine a net that is constructed by adding t′ to
736
+ the net in Fig. 6a with connections (p4, t′) and (t′, p5). Such a net makes t3 in a
737
+ loop but it is no longer a free-choice net. The constructs of loops and loopτ are
738
+ almost the same, the difference is that the labels of t3 and the silent transition
739
+ t5 are swapped.
740
+ Finally, to get the set of candidate nets Ci, we apply the three pattern-
741
+ building functions to every net W ∈ Ci
742
+ base. Observe that all the nets in Fig. 6
743
+ are elements of C3.
744
+ 4.4
745
+ Selection and Fall-through
746
+ Selection In the last step, we select the next existing net W i+1 from the set
747
+ of candidates Ci evaluated by the projected log Li. The selection is done in a
748
+ stepwise manner. We first try to filter out the candidates that do not reach a
749
+ user-defined replay fitness threshold θ and then select the best net out of the
750
+ rest in terms of F1 score, which is calculated as the harmonic mean of fitness
751
+ and precision. We use alignment-based fitness [4] and precision [5].
752
+
753
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
754
+ 13
755
+ Fall-through If none of the nets in Ci reach the ���tness threshold θ, we adopt
756
+ a fall-through. This is done by going back to Step 2, where γ(i) is added to
757
+ W i = (P i, T i, F i, li, ps, pe, ⊤, ⊥), but without the constraints of V i. This can
758
+ also be seen as setting V i = P i ∪ T i. In this case, a new place p′ with arcs
759
+ {(⊤, p′), (p′, ⊥)} can be always added by ψP as p is linear dependent on ps and
760
+ pe. Then, the patterns building functions can be applied to ensure that the fitness
761
+ threshold θ is guaranteed in every iteration.
762
+ 5
763
+ Evaluation
764
+ In this section, we present the experiments conducted to evaluate our approach.
765
+ The presented approach in this paper is implemented in Python using PM4Py3
766
+ and can be accessed here4. As mentioned, the algorithm takes as inputs a log
767
+ and three parameters including two thresholds θ, c, and the types of ordering
768
+ strategy. Using this implementation, we conduct three experiments to address
769
+ the following questions (1) How effective are the pre-defined patterns? (2) What
770
+ are the effects of the ordering strategy on the model quality and the execution
771
+ time? (3) Can the model quality be improved by the non-block structures?
772
+ 5.1
773
+ Experiment Setup
774
+ Dataset: We use four public available real-life event logs, which are BPI20175,
775
+ helpdesk6, hospitalBilling7, and traffic8 respectively. BPI2017 is split into two
776
+ sub logs, BPI2017A and BPI2017O, using the event prefixes. To focus on the
777
+ mainstream behaviors, the logs are filtered to include at least 95% of the cases.
778
+ Experiment 1 (Effectiveness of patterns): The first experiment aims to
779
+ evaluate how effective are the pre-defined patterns. As our approach is based on
780
+ [11], this can be evaluated by comparing the quality of the intermediate models
781
+ of our approach to the ones from ProDiGy [12], which adopts a similar setting.
782
+ To conduct the experiment, we follow the top recommendation of ProDiGy in
783
+ every step to get the intermediate models and compare the models’ quality with
784
+ ours. We use the projected log of every iteration to evaluate the model obtained
785
+ after adding additional activity to the model. To have a fair comparison, we
786
+ force our approach to use the same order of adding activities from ProDiGy.
787
+ Experiment 2 (Effects of Ordering Strategy & Search Space Pruning):
788
+ The order of adding activities to the log is crucial to our approach as model
789
+ quality is highly dependent on the order [11]. Moreover, the order can influence
790
+ the execution time due to its influence on the search space pruning. Therefore, we
791
+ would like to investigate the effects of the ordering strategy on the model quality
792
+ and the execution time. To set up the experiment, we apply the approach to the
793
+ 3 https://pm4py.fit.fraunhofer.de/
794
+ 4 https://github.com/tsunghao-huang/synthesisRulesMiner
795
+ 5 https://doi.org/10.4121/uuid:3926db30-f712-4394-aebc-75976070e91f
796
+ 6 https://doi.org/10.4121/uuid:0c60edf1-6f83-4e75-9367-4c63b3e9d5bb
797
+ 7 https://doi.org/10.4121/uuid:76c46b83-c930-4798-a1c9-4be94dfeb741
798
+ 8 https://doi.org/10.4121/uuid:270fd440-1057-4fb9-89a9-b699b47990f5
799
+
800
+ 14
801
+ T. Huang and W. M. P. van der Aalst
802
+ five event logs using the two different ordering strategies while keeping the other
803
+ two parameters at the same values. We evaluate the model quality in terms of
804
+ fitness, precision, and F1 score. In addition, we keep track of the ratio of the
805
+ reduced nodes, which is calculated by
806
+ |V i|
807
+ |P i∪T i|. This gives us an indication of the
808
+ effectiveness of search space pruning.
809
+ Experiment 3 (Effects of non-block structures): In this experiment, we
810
+ compare our approach to the state-of-the-art: Inductive Miner - Infrequent (IMf)
811
+ [15]. As the models discovered by IMf are guaranteed to be sound free-choice
812
+ workflow net as well, comparing our approach with IMf enables us to see if the
813
+ models can benefit from the non-block structures discovered by our approach.
814
+ For each event log, we apply IMf using five different values ([0.1, 0.2, 0.3, 0.4, 0.5])
815
+ for the filter threshold and choose the best model (by F1 score) to compare the
816
+ quality with the ones discovered by our approach in experiment 2.
817
+ For all the experiments, we use the alignment-based approaches to calculate
818
+ fitness [4] and precision [5]. We also calculate the F1 score as the harmonic mean
819
+ of the fitness and precision.
820
+ 5.2
821
+ Results
822
+ Effectiveness of Patterns Fig. 7 shows the result of the comparison. The
823
+ 1
824
+ 2
825
+ 3
826
+ 4
827
+ 5
828
+ 6
829
+ 7
830
+ 8
831
+ 9
832
+ 10
833
+ 11
834
+ Number of activities added
835
+ 0.4
836
+ 0.6
837
+ 0.8
838
+ 1.0
839
+ Fitness
840
+ our approach
841
+ ProDiGy
842
+ (a) Fitness
843
+ 1
844
+ 2
845
+ 3
846
+ 4
847
+ 5
848
+ 6
849
+ 7
850
+ 8
851
+ 9
852
+ 10
853
+ 11
854
+ Number of activities added
855
+ 0.4
856
+ 0.6
857
+ 0.8
858
+ 1.0
859
+ Precision
860
+ our approach
861
+ ProDiGy
862
+ (b) Precision
863
+ Fig. 7: Results on fitness and precision comparison for the effectiveness of patterns
864
+ fitness and precision are the average values of the five event logs. As one can
865
+ see from the figures, both approaches can capture the behaviors quite well for
866
+ the first three activities added. When adding more activities to the model,
867
+ our approach has consistently higher values for both fitness and precision than
868
+ ProDiGy. One might think that this is expected as we extend the set of patterns
869
+ used in ProDiGy. However, note that ProDiGy evaluates every possible synthesis
870
+ rules applications while we only focus on a subset of the nodes using log heuris-
871
+ tics. There is a trade-off between optimal solution and time in our approach.
872
+ Nevertheless, the results show that the extended patterns enable us to discover
873
+ models with higher quality compared to the existing approach, ProDiGy, while
874
+ limiting the search space.
875
+
876
+ Discovering Sound Free-choice Workflow Nets With Non-block Structures
877
+ 15
878
+ Effects of Ordering Strategy and Search Space Pruning Tab. 1 shows the
879
+ results of experiments 2 and 3. We observe that the BFS-based ordering strat-
880
+ egy performs better than the frequency-based strategy (in terms of F1 score and
881
+ time) for four of the five logs. We further investigate the reason for the shorter
882
+ execution time of BFS-based ordering. As shown in Fig. 8, it turns out that the
883
+ BFS-ordering strategy is more effective (lower
884
+ |V i|
885
+ |P i∪T i|) in reducing the search
886
+ space
887
+ at
888
+ the
889
+ later
890
+ stage
891
+ of
892
+ the
893
+ discovery
894
+ process.
895
+ 2
896
+ 3
897
+ 4
898
+ 5
899
+ 6
900
+ 7
901
+ 8
902
+ 9
903
+ 10
904
+ 11
905
+ Number of activities added
906
+ 0.1
907
+ 0.3
908
+ 0.5
909
+ 0.7
910
+ Ratio of nodes reduced
911
+ |Vi|
912
+ |Pi
913
+ Ti|
914
+ frequency
915
+ BFS
916
+ Fig. 8: Comparison of the to-be-considered
917
+ nodes ratio for each iteration between the
918
+ two ordering strategies.
919
+ As the model grows, checking the pre-
920
+ conditions of an application for the
921
+ linear dependent place or transition
922
+ rule becomes more expensive. Reduc-
923
+ ing the search space more effectively
924
+ at the later stage is more beneficial in
925
+ terms of execution time in most cases.
926
+ BFS-based ordering achieves this by
927
+ considering the closeness of activities
928
+ in the process. In such case, activities
929
+ that are closer together are added first
930
+ and it is more likely for BFS-based
931
+ ordering to focus on a smaller subset
932
+ of nodes on the existing net when pruning the search space compared to the
933
+ frequency-based one.
934
+ Table 1: Results about effects of ordering strategy and comparison to IMf
935
+ Log
936
+ Miner Ordering
937
+ Strategy
938
+ IMf
939
+ filter Fitness Precision
940
+ F1
941
+ Time (s)
942
+ BPI2017A
943
+ ours frequency
944
+ -
945
+ 0.970
946
+ 0.947
947
+ 0.958
948
+ 734
949
+ ours
950
+ BFS
951
+ -
952
+ 0.989
953
+ 0.935
954
+ 0.961
955
+ 342
956
+ IMf
957
+ -
958
+ 0.2
959
+ 0.999
960
+ 0.936
961
+ 0.967
962
+ 10
963
+ BPI2017O
964
+ ours frequency
965
+ -
966
+ 0.994
967
+ 0.962
968
+ 0.978
969
+ 560
970
+ ours
971
+ BFS
972
+ -
973
+ 0.989
974
+ 1.000
975
+ 0.994
976
+ 240
977
+ IMf
978
+ -
979
+ 0.2
980
+ 0.997
981
+ 0.907
982
+ 0.950
983
+ 7
984
+ helpdesk
985
+ ours frequency
986
+ -
987
+ 0.972
988
+ 0.984
989
+ 0.977
990
+ 54
991
+ ours
992
+ BFS
993
+ -
994
+ 0.981
995
+ 0.976
996
+ 0.978
997
+ 44
998
+ IMf
999
+ -
1000
+ 0.2
1001
+ 0.967
1002
+ 0.950
1003
+ 0.958
1004
+ 1
1005
+ hospital
1006
+ billing
1007
+ ours frequency
1008
+ -
1009
+ 0.961
1010
+ 0.810
1011
+ 0.879
1012
+ 567
1013
+ ours
1014
+ BFS
1015
+ -
1016
+ 0.989
1017
+ 0.935
1018
+ 0.961
1019
+ 407
1020
+ IMf
1021
+ -
1022
+ 0.2
1023
+ 0.982
1024
+ 0.906
1025
+ 0.943
1026
+ 45
1027
+ traffic
1028
+ ours frequency
1029
+ -
1030
+ 0.960
1031
+ 0.930
1032
+ 0.945
1033
+ 321
1034
+ ours
1035
+ BFS
1036
+ -
1037
+ 0.964
1038
+ 0.720
1039
+ 0.825
1040
+ 427
1041
+ IMf
1042
+ -
1043
+ 0.4
1044
+ 0.904
1045
+ 0.720
1046
+ 0.801
1047
+ 28
1048
+ Effects of Non-block Structures Table 1 shows that compared to IMf, the
1049
+ models discovered by our approach have higher F1 scores for four of the five
1050
+ logs. Note that the fitness values of the models discovered by our approach are
1051
+
1052
+ 16
1053
+ T. Huang and W. M. P. van der Aalst
1054
+ all higher than the defined threshold 0.95. In general, IMf tends to discover mod-
1055
+ els with higher fitness values while our approach discovers models with higher
1056
+ precision. In IMf, one can use the filter threshold to balance fitness and precision.
1057
+ This is also the case in our approach, the user can set a lower fitness thresh-
1058
+ old to include more candidate nets that are less fitting but more precise. Fig. 9
1059
+ shows the discovered models from the two approaches for the hospitalBilling
1060
+ log. While the overall structure of Fig. 9a is similar to its counterpart in Fig. 9b,
1061
+ our approach discovered non-block structures at the later stage of the process.
1062
+ Such construct is not possible to model by IMf. The result shows that our ap-
1063
+ proach can discover sound free-choice workflow nets with non-block structures
1064
+ and produce competitive model quality as the state-of-the-art algorithm.
1065
+ (a) The discovered model using our approach. Due to the more flexible structure, one can exe-
1066
+ cute EMPTY, BILLED, or REOPEN after CODE NOK while only BILLED or REOPEN are
1067
+ executable after CODE OK. The construct is not discoverable by IMf.
1068
+ (b) The discovered model using IMf. Note that activity REOPEN is dropped by the filter of IMf.
1069
+ Fig. 9: The models discovered by our approach and IMf for the hospitalBilling log.
1070
+ 6
1071
+ Conclusion and Future Work
1072
+ In this paper, we present a discovery algorithm that aims to discover sound free-
1073
+ choice workflow nets with non-block structures. The algorithm utilizes the syn-
1074
+ thesis rules to incrementally add activities with predefined patterns to discover
1075
+ models that are guaranteed to be sound and free-choice. Moreover, a certain
1076
+ level of replay fitness is guaranteed by a user-defined threshold.
1077
+ The approach has been implemented and evaluated using various real-life
1078
+ event logs. The results show that the process models discovered by our approach
1079
+ have higher model quality (in terms of both replay fitness and precision) than
1080
+ the existing approach [12], which also depends on synthesis rules. Moreover, our
1081
+ approach produces competitive model quality compared to the state-of-the-art:
1082
+ Inductive Miner - infrequent. For future work, we plan to explore more advanced
1083
+ ordering strategies and investigate their influences on the model quality and
1084
+ computation time. The other direction is to further speed up the approach as
1085
+ the long execution time is a clear limitation. This could be done by exploiting
1086
+ the log-based heuristics further.
1087
+
1088
+ DELETE
1089
+ CHANGE
1090
+ IAGN
1091
+ CODE
1092
+ REOPEIDELETE
1093
+ CHANGE
1094
+ DIAGN
1095
+ CODE NOK
1096
+ EMPTY
1097
+ NEW
1098
+ CODE OK
1099
+ End
1100
+ FIN
1101
+ RELEASE
1102
+ BLLEDDiscovering Sound Free-choice Workflow Nets With Non-block Structures
1103
+ 17
1104
+ Acknowledgements. We thank the Alexander von Humboldt (AvH) Stiftung
1105
+ for supporting our research.
1106
+ References
1107
+ 1. van der Aalst, W.M.P.: The application of Petri nets to workflow management. J.
1108
+ Circuits Syst. Comput. 8(1), 21–66 (1998)
1109
+ 2. van der Aalst, W.M.P.: Process Mining - Data Science in Action, Second Edition.
1110
+ Springer (2016)
1111
+ 3. van der Aalst, W.M.P.: Using free-choice nets for process mining and business
1112
+ process management. In: FedCSIS 2021. vol. 25, pp. 9–15 (2021)
1113
+ 4. van der Aalst, W.M.P., Adriansyah, A., van Dongen, B.F.: Replaying history on
1114
+ process models for conformance checking and performance analysis. WIREs Data
1115
+ Mining Knowl. Discov. 2(2), 182–192 (2012)
1116
+ 5. Adriansyah, A., Munoz-Gama, J., Carmona, J., van Dongen, B.F., van der Aalst,
1117
+ W.M.P.: Measuring precision of modeled behavior. Inf. Syst. E Bus. Manag. 13(1),
1118
+ 37–67 (2015)
1119
+ 6. Augusto, A., Conforti, R., Dumas, M., Rosa, M.L., Bruno, G.: Automated discovery
1120
+ of structured process models from event logs:the discover-and-structure approach.
1121
+ Data Knowl. Eng. 117, 373–392 (2018)
1122
+ 7. Augusto, A., Conforti, R., Dumas, M., Rosa, M.L., Maggi, F.M., Marrella, A.,
1123
+ Mecella, M., Soo, A.: Automated discovery of process models from event logs:
1124
+ Review and benchmark. IEEE Trans. Knowl. Data Eng. 31(4), 686–705 (2019)
1125
+ 8. Augusto, A., Conforti, R., Dumas, M., Rosa, M.L., Polyvyanyy, A.: Split miner:
1126
+ automated discovery of accurate and simple business process models from event
1127
+ logs. Knowl. Inf. Syst. 59(2), 251–284 (2019)
1128
+ 9. Buijs, J.C.A.M., van Dongen, B.F., van der Aalst, W.M.P.: A genetic algorithm
1129
+ for discovering process trees. In: CEC 2012. pp. 1–8. IEEE (2012)
1130
+ 10. Desel, J., Esparza, J.: Free Choice Petri Nets. No. 40, Cambridge university press
1131
+ (1995)
1132
+ 11. Dixit, P.M.: Interactive Process Mining. Ph.D. thesis, Technische Universiteit Eind-
1133
+ hoven (2019)
1134
+ 12. Dixit, P.M., Buijs, J.C.A.M., van der Aalst, W.M.P.: Prodigy : Human-in-the-loop
1135
+ process discovery. In: RCIS 2018. pp. 1–12. IEEE (2018)
1136
+ 13. Dixit, P.M., Verbeek, H.M.W., Buijs, J.C.A.M., van der Aalst, W.M.P.: Interactive
1137
+ data-driven process model construction. In: ER 2018. vol. 11157, pp. 251–265.
1138
+ Springer (2018)
1139
+ 14. van Dongen, B.F., de Medeiros, A.K.A., Wen, L.: Process mining: Overview and
1140
+ outlook of Petri net discovery algorithms. Trans. Petri Nets Other Model. Concurr.
1141
+ 2, 225–242 (2009)
1142
+ 15. Leemans, S.J.J., Fahland, D., van der Aalst, W.M.P.: Scalable process discovery
1143
+ and conformance checking. Softw. Syst. Model. 17(2), 599–631 (2018)
1144
+ 16. Schuster, D., van Zelst, S.J., van der Aalst, W.M.P.: Incremental discovery of
1145
+ hierarchical process models. In: RCIS 2020. vol. 385, pp. 417–433. Springer (2020)
1146
+
BtE0T4oBgHgl3EQfQAAH/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
GdAyT4oBgHgl3EQfSvfs/content/tmp_files/2301.00094v1.pdf.txt ADDED
@@ -0,0 +1,1049 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00094v1 [math.CV] 31 Dec 2022
2
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL
3
+ SHEAVES ON SINGULAR COMPLEX SPACES AND REGULARITY OF
4
+ WEAK KÄHLER-EINSTEIN METRICS
5
+ ZHENQIAN LI
6
+ Abstract. In this article, we will characterize regular points respectively by the local van-
7
+ ishing, positivity of the Ricci curvature and L2-solvability of the ∂-equation together with
8
+ Skoda’s theorem for Nadel-Lebesgue multiplier ideal sheaves associated to plurisubhar-
9
+ monic (psh) functions on any (reduced) complex space of pure dimension. As a by-product,
10
+ we show that any weak Kähler-Einstein metric on singular Q-Fano/Calabi-Yau/general
11
+ type varieties cannot be smooth, and that in general there exists no singular normal Kähler
12
+ complex space such that the Kähler metric is Kähler-Einstein on the regular locus.
13
+ 1. Introduction
14
+ Throughout this note, all complex spaces are always assumed to be reduced and para-
15
+ compact unless otherwise mentioned; we mainly refer to [19, 40] for basic references on
16
+ the theory of complex spaces.
17
+ 1.1. Local vanishing for multiplier ideals. The local vanishing theorem for the higher
18
+ direct images of sheaves computing multiplier ideals plays an important role in complex
19
+ geometry and algebraic geometry, by which many local/global properties of multiplier
20
+ ideal could be deduced, e.g., the restriction theorem, Nadel vanishing theorem and Skoda’s
21
+ theorem for multiplier ideals and so on (cf. [10, 28], etc.).
22
+ Let (X, ω) be a Hermitian complex space of pure dimension n and ϕ ∈ QPsh(X) be a
23
+ quasi-psh function on X. Then we can define the Nadel-Lebesgue multiplier ideal sheaf
24
+ INL(ϕ) associated to ϕ on X by the integrability with respect to the Lebesgue measure dVω
25
+ (see Definition 2.2), which coincides with the usual multiplier ideal sheaf I (ϕ) introduced
26
+ by Nadel whenever X is smooth. Let π : �X → X be any log resolution of the Jacobian ideal
27
+ JacX of X, then it follows that the Nadel-Lebesgue multiplier ideal sheaf
28
+ INL(ϕ) = π∗
29
+
30
+ O�X(�K�X/X) ⊗ I (ϕ ◦ π)
31
+
32
+ .
33
+ When X is smooth, the Mather discrepancy divisor �K�X/X is nothing but the relative canon-
34
+ ical divisor K�X/X := K�X − π∗KX of �X over X. Then, we have the following local vanishing
35
+ for multiplier ideals (cf. [28, 37])
36
+ Rqπ∗
37
+
38
+ O�X(K�X/X) ⊗ I (ϕ ◦ π)
39
+
40
+ = 0, ∀q ≥ 1.
41
+ Therefore, it is natural to ask whether we could establish a similar local vanishing result
42
+ in the singular setting, i.e.,
43
+ Rqπ∗
44
+
45
+ O�X(�K�X/X) ⊗ I (ϕ ◦ π)
46
+
47
+ = 0, ∀q ≥ 1.
48
+ In the present note, one of our goals is to study the local vanishing in the context of Nadel-
49
+ Lebesgue multiplier ideals. In particular, based on Skoda’s division for Nadel-Lebesgue
50
+ Date: January 3, 2023.
51
+ 2010 Mathematics Subject Classification. 14F18, 32L20, 32Q20, 32S05, 32U05, 32W05.
52
+ Key words. Multiplier ideal sheaves, plurisubharmonic functions, vanishing theorems, ∂-equations, Skoda’s
53
+ L2 division theorem, Kähler-Einstein metrics.
54
+ E-mail: [email protected].
55
+ 1
56
+
57
+ 2
58
+ ZHENQIAN LI
59
+ multiplier ideals, we will prove that such a local vanishing for Nadel-Lebesgue multiplier
60
+ ideals is in fact equivalent to smoothness of the ambient space in some sense; see Theorem
61
+ 1.3 for a detailed statement.
62
+ 1.2. Skoda’s ideal generation by L2 estimates for the ∂-equation. In the classical works
63
+ [44, 45], relying on the L2 methods due to [1, 25, 26] in several complex variables, Skoda
64
+ established an analytic criterion on the ideal generation by a given collection of holo-
65
+ morphic functions or sections. In the original proof of Skoda’s ideal generation, as well as
66
+ standard techniques in functional analysis for the argument on a priori estimate and solving
67
+ ∂-equation with L2 estimates, he also developed special analytic techniques by restricting
68
+ the domain of the ∂-operator to an appropriate subspace of the usual L2 space and inducing
69
+ an L2 estimate on this new operator.
70
+ As applications, Skoda’s theorem is a crucial ingredient in proving the Briançon-Skoda
71
+ theorem in commutative algebra [6, 27, 35] and an effective version of the Nullstellensatz
72
+ in algebraic geometry [13]. Moreover, a special case of Skoda’s ideal generation also
73
+ played key roles in Siu’s works on the deformation invariance of plurigenera [42] and finite
74
+ generation of the canonical ring [43]. The interaction between several complex variables,
75
+ complex algebraic geometry and partial differential equations has been an attractive area
76
+ for the researchers. For the sake of reader’s convenience, we state a version of Skoda’s L2
77
+ division theorem as below.
78
+ Theorem 1.1. ([45], Théorème 2). Let (X, ω) be an n-dimensional weakly pseudoconvex
79
+ Kähler manifold with ϕ ∈ Psh(X), and g : E → Q be a surjective morphism of Hermitian
80
+ holomorphic vector bundles with rE = rank E and rQ = rank Q. Suppose that E is Nakano
81
+ semi-positive on X and L → X is a Hermitian line bundle such that
82
+
83
+ −1Θ(L) − ρ
84
+
85
+ −1Θ(det Q) ≥ 0
86
+ for ρ = min{n, rE − rQ} + ε and some ε > 0.
87
+ Then, for every f ∈ H0(X, Q ⊗ KX ⊗ L) satisfying
88
+
89
+ X
90
+ ⟨�
91
+ gg∗ f, f⟩ · (det gg∗)−ρ−1e−2ϕdVω < +∞,
92
+ there exists h ∈ H0(X, E ⊗ KX ⊗ L) such that f = g · h and
93
+
94
+ X
95
+ |h|2 · (det gg∗)−ρe−2ϕdVω ≤ ρ
96
+ ε ·
97
+
98
+ X
99
+ ⟨�
100
+ gg∗ f, f⟩ · (det gg∗)−ρ−1e−2ϕdVω.
101
+ Due to Theorem 1.1, if we consider the trivial bundles E, Q and L on a pseudoconvex
102
+ domain, then by combining with the strong openness of multiplier ideal sheaves established
103
+ by Guan-Zhou [21], we can reformulate Theorem 1.1 in the language of multiplier ideals
104
+ as follows (cf. also Remark A.3):
105
+ Theorem 1.2. Let X be an n-dimensional complex manifold with ϕ ∈ QPsh(X) a quasi-psh
106
+ function and a ⊂ OX a nonzero ideal sheaf with r (local) generators. Then, it follows that
107
+ I �ϕ + kϕa
108
+ � = a · I �ϕ + (k − 1)ϕa
109
+ �, ∀k ≥ min{n, r},
110
+ where ϕa := 1
111
+ 2 log(�
112
+ i |gi|2) and (gi) is any local system of generators of a.
113
+ Motivated by the above reformulation of Theorem 1.1, it is interesting for us to explore
114
+ an analogue to Theorem 1.2 for Nadel-Lebesgue multiplier ideals in the singular setting.
115
+ In order to achieve such a goal, a natural idea is to generalize Skoda’s L2 methods to the
116
+ singular case, i.e., creating an appropriate L2 theory for the ∂-operator on singular complex
117
+ spaces. However, as presented in [15, 16], it seems not to be possible to establish a general
118
+ theory as in the smooth setting to solve the ∂-equation with L2 estimates on complex spaces
119
+ with singularities; one can refer to [17, 38, 39, 41] for some partial results on the related
120
+ topics.
121
+
122
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
123
+ 3
124
+ On the other hand, we can also consider to apply Theorem 1.1 near the singularities
125
+ under some reasonable assumptions on the positivity of curvatures. Fortunately, we could
126
+ show that positivity of the Ricci curvature on the regular locus is in fact equivalent to the
127
+ desired Skoda’s ideal generation and L2-solvability of the ∂-equation. More precisely, we
128
+ state our main result in the following:
129
+ Theorem 1.3. Let X be a (Hermitian) complex space of pure dimension n with x ∈ X a
130
+ normal point and π : �X → X a log resolution of the Jacobian ideal JacX of X. Then, the
131
+ following statements are equivalent:
132
+ (1) For each quasi-psh function ϕ near the point x ∈ X, we have
133
+ Rqπ∗
134
+
135
+ O�X(�K�X/X) ⊗ I (ϕ ◦ π)
136
+
137
+ = 0, ∀q ≥ 1.
138
+ (2) For each quasi-psh function ϕ near the point x ∈ X, we have
139
+ Rqπ∗
140
+
141
+ O�X(�K�X/X) ⊗ I (ϕ ◦ π)
142
+
143
+ = 0, ∀1 ≤ q < n.
144
+ (3) For some Stein neighborhood Ω ⊂⊂ X of the point x, there exists a Kähler metric
145
+ ω on Ω such that the Ricci curvature Ric(ω) ≥ 0 on the regular locus Ωreg of Ω.
146
+ (4) For some Stein neighborhood Ω ⊂⊂ X of the point x, there exists a Kähler metric
147
+ ω and a C ∞ differentiable real function ψ on Ω such that Ric(ω) +
148
+
149
+ −1∂∂ψ ≥ 0
150
+ on the regular locus Ωreg of Ω.
151
+ (5) For some Stein neighborhood Ω ⊂⊂ X of the point x, there exists a Kähler metric
152
+ ω and a Hermitian line bundle L on Ω such that for any smooth ϕ ∈ SPsh(Ω) and
153
+ v ∈ L2
154
+ 0,q(Ωreg, L) satisfying ∂v = 0 and
155
+
156
+ Ωreg
157
+ ⟨A−1
158
+ ϕ v, v⟩ e−2ϕdVω < +∞
159
+ with the curvature operator Aϕ = [
160
+
161
+ −1∂∂ϕ, Λω] on Ωreg, we have u ∈ L2
162
+ 0,q−1(Ωreg, L)
163
+ such that ∂u = v and
164
+
165
+ Ωreg
166
+ |u|2e−2ϕdVω ≤
167
+
168
+ Ωreg
169
+ ⟨A−1
170
+ ϕ v, v⟩ e−2ϕdVω.
171
+ (6) For some Stein neighborhood Ω ⊂⊂ X of the point x, there exists a Kähler metric
172
+ ω and a Hermitian line bundle L on Ω such that for any smooth ϕ ∈ SPsh(Ω) and
173
+ v ∈ L2
174
+ 0,1(Ωreg, L) satisfying ∂v = 0 and
175
+
176
+ Ωreg
177
+ ⟨A−1
178
+ ϕ v, v⟩ e−2ϕdVω < +∞,
179
+ we have u ∈ L2(Ωreg, L) such that ∂u = v and
180
+
181
+ Ωreg
182
+ |u|2e−2ϕdVω ≤
183
+
184
+ Ωreg
185
+ ⟨A−1
186
+ ϕ v, v⟩ e−2ϕdVω.
187
+ (7) The Skoda’s theorem holds for Nadel-Lebesgue multiplier ideals, i.e., for any
188
+ nonzero ideal sheaf a with r generators and quasi-psh function ϕ near the point
189
+ x ∈ X, it holds that
190
+ INL
191
+ �ϕ + kϕa
192
+ � = a · INL
193
+ �ϕ + (k − 1)ϕa
194
+ �, ∀k ≥ min{n, r}.
195
+ (8) For any nonzero ideal sheaf a near the point x ∈ X, it holds that
196
+ INL
197
+ �nϕa
198
+ � = a · INL
199
+ �(n − 1)ϕa
200
+ �.
201
+ (9) For any nonzero ideal sheaf a near the point x ∈ X, it holds that
202
+ INL
203
+ �nϕa
204
+ � ⊂ a.
205
+ (10) The point x ∈ X is a regular point of X.
206
+
207
+ 4
208
+ ZHENQIAN LI
209
+ In the above result, the most interesting and amazing point is that it presents several
210
+ characterizations of regular points by various statements involved Nadel-Lebesgue mul-
211
+ tiplier ideals, which look like almost irrelevant; e.g., (1, 2) in algebraic geometry, (3, 4)
212
+ in differential geometry and (5, 6) in partial differential equations together with (7—9) in
213
+ commutative algebra. The core idea of all arguments originates from the Skoda’s ideal
214
+ generation by the L2 approaches in several complex variables.
215
+ Remark 1.4. Simple examples show that the assumption that x is a normal point of X
216
+ cannot be removed in Theorem 1.3; in particular, any of the statements (1, 2, 7, 8) will not
217
+ imply (10) in that case.
218
+ As a straightforward consequence of Theorem 1.3, we have
219
+ Corollary 1.5. Any normal Kähler space with nonnegative Ricci curvature on the regular
220
+ locus must be non-singular.
221
+ 1.3. Kähler-Einstein metrics on singular varieties. Let X be a normal Q-Gorenstein
222
+ Kähler space, that is, a normal Kähler space whose canonical class KX defines a Q-line
223
+ bundle on X. A Kähler current ω ∈ c1(±KX) is called a weak (or singular) Kähler-Einstein
224
+ metric on X if ω has bounded local potentials and is a genuine Kähler-Einstein metric on
225
+ the regular locus Xreg of X (cf. [3, 4, 5, 14], etc.). A weak Kähler-Einstein metric ω on
226
+ X is called a Kähler-Einstein metric if ω is a Kähler metric on X, i.e., ω has smooth local
227
+ potentials. For general expositions on the topic of Kähler-Einstein metrics one can refer to
228
+ [2, 23, 47, 48, 49, 50] and the references therein. In particular, we state some recent results
229
+ as follows.
230
+ Theorem 1.6. ([3, 5, 14, 30, 31, 32, 36], etc.). Let X be a normal Q-Gorenstein complex
231
+ projective variety. Then:
232
+ (1) If X is a Q-Calabi-Yau variety with only log terminal singularities, then X admits
233
+ a weak Kähler-Einstein metric.
234
+ (2) If KX is ample, then X admits a weak Kähler-Einstein metric if and only if X is
235
+ K-stable.
236
+ (3) If −KX is ample, then X admits a weak Kähler-Einstein metric if and only if X is
237
+ K-polystable.
238
+ A basic and widely open problem in Kähler geometry/geometric analysis is understand-
239
+ ing the geometric asymptotic behavior of the weak Kähler-Einstein metric near the singular
240
+ locus Xsing of X. In [24], the authors made a breakthrough with a very precise descrip-
241
+ tion for a class of Calabi-Yau varieties with smoothable isolated singularities, which are
242
+ in further required to be isomorphic to a neighborhood of the vertex in a strongly regular
243
+ Calabi-Yau cone; see also [7, 8, 18] for some recent progress in this direction. In more gen-
244
+ eral situations, by using deep tools in the theory of degenerate complex Monge-Ampère
245
+ equations on singular complex spaces, the continuity of local potentials of weak Kähler-
246
+ Einstein metrics is established for all Q-Fano/Calabi-Yau varieties in [4, 22], but so far
247
+ little is known for the higher order regularity in general and it is desirable to establish one
248
+ for weak Kähler-Einstein potentials. However, relying on Theorem 1.3, we will see that
249
+ too much regularity cannot be expected and in fact any weak Kähler-Einstein potential is at
250
+ most C α (α < 2) differentiable near the singularities. In particular, we obtain the following
251
+ Theorem 1.7. Let X be a normal Q-Gorenstein Kähler space admitting a weak Kähler-
252
+ Einstein metric ω. Then, ω is smooth on X if and only if X is non-singular.
253
+ 2. Preliminaries
254
+ Firstly, we introduce the notion of Nadel-Lebesgue multiplier ideal sheaf on any com-
255
+ plex space of pure dimension and then present some useful facts used throughout this note.
256
+
257
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
258
+ 5
259
+ Definition 2.1. Let X be a complex space of pure dimension and ϕ ∈ L1
260
+ loc(Xreg) with
261
+ respect to the Lebesgue measure. Then, the complex space X is said to be a Hermitian
262
+ complex space if there is a Hermitian metric ω on the regular part (may be disconnected)
263
+ Xreg of X such that ω is locally the restriction of a Hermitian metric on some CN for a local
264
+ embedding of X. It follows from the differentiable partition of unity that every complex
265
+ space is a Hermitian complex space as in the smooth case.
266
+ The complex space X is called to be a Kähler space if there is a Hermitian metric ω on X
267
+ such that ω is locally the restriction of a Kähler metric on some CN for a local embedding
268
+ of X. In particular, it admits smooth strictly psh functions as local potentials.
269
+ We say that the function ϕ is quasi-plurisubharmonic (quasi-psh for short) on X if it is
270
+ locally equal to the sum of a psh function and of a smooth function on X. The set of quasi-
271
+ psh (resp. psh and strictly psh) functions on X is denoted by QPsh(X) (resp. Psh(X) and
272
+ SPsh(X)). A quasi-psh function ϕ ∈ QPsh(X) will be said to have analytic singularities on
273
+ X if ϕ can be written locally as
274
+ ϕ = c
275
+ 2 log(|f1|2 + · · · + |fN0|2) + O(1),
276
+ where c ∈ R≥0 and (fi) are holomorphic functions.
277
+ Definition 2.2. Let (X, ω) be a Hermitian complex space of pure dimension and ϕ ∈
278
+ L1
279
+ loc(Xreg) with respect to the Lebesgue measure.
280
+ The Nadel-Lebesgue multiplier ideal sheaf associated to ϕ on X is defined to be the
281
+ OX-submodule INL(ϕ) ⊂ MX of germs of meromorphic functions f ∈ MX,x such that
282
+ |f|2e−2ϕ is integrable with respect to the Lebesgue measure dVω near the point x ∈ X. One
283
+ can check that INL(ϕ) is independent of the choice of Hermitian metric ω on X.
284
+ The log canonical threshold (or complex singularity exponent) LCTx(ϕ) of ϕ at a point
285
+ x ∈ X is defined to be
286
+ LCTx(ϕ) := sup �c ≥ 0 | OX,x ⊂ INL(cϕ)x
287
+ � .
288
+ It is convenient to put LCTx(−∞) = 0.
289
+ It is easy to see that INL(ϕ) ⊂ OX is an ideal sheaf when X is a normal complex space
290
+ and ϕ is locally bounded from above on X. In addition, if X is smooth and ϕ ∈ QPsh(X),
291
+ then INL(ϕ) is nothing but the usual multiplier ideal sheaf I (ϕ) introduced by Nadel (see
292
+ [10]).
293
+ Remark 2.3. Since the definition of Nadel-Lebesgue multiplier ideals is local, we can com-
294
+ pute the multiplier ideals by choosing a special Hermitian metric ω for a local embedding
295
+ of X. In particular, if X is an n-dimensional complex subspace of some domain in CN,
296
+ we can take Hermitian metric ω on X to be the inherited standard Kähler metric from CN.
297
+ Then, we have dVω = 1
298
+ n!υn|Xreg, where υ =
299
+
300
+ −1
301
+ 2
302
+ N�
303
+ k=1
304
+ dzk ∧ d¯zk.
305
+ For the sake of reader’s convenience, we state a basic estimate related to local volume
306
+ of an analytic subset as follows.
307
+ Lemma 2.4. ([20], Lemma 2.3). Let X be a pure n-dimensional analytic subset through
308
+ the origin 0 of some domain in CN (N ≥ 2). Then, there is a Stein neighborhood U ⊂⊂ CN
309
+ of the origin 0 such that for any 0 ≤ ε < 1, we have
310
+
311
+ U∩X
312
+ 1
313
+ (|z1|2 + · · · + |zN|2)n−1+ε dVω < +∞,
314
+ where dVω = 1
315
+ n!υn|Xreg and υ =
316
+
317
+ −1
318
+ 2
319
+ N�
320
+ k=1
321
+ dzk∧d¯zk.
322
+ Analogous to the Nadel-Ohsawa multiplier ideal sheaves introduced in [33, 34] (see also
323
+ [9, 12] for the algebro-geometric counterpart), we state some related properties as follows.
324
+
325
+ 6
326
+ ZHENQIAN LI
327
+ Proposition 2.5. (1) Let π : �X → X be a log resolution of the Jacobian ideal JacX of X
328
+ and �K�X/X be the Mather discrepancy divisor. Then, we have the image
329
+ Im
330
+
331
+ π∗Ωn
332
+ X ֒→ Ωn
333
+ �X
334
+
335
+ = O�X(−�K�X/X) · Ωn
336
+ �X,
337
+ and
338
+ INL(ϕ) = π∗
339
+
340
+ O�X(�K�X/X) ⊗ I (ϕ ◦ π)
341
+
342
+ .
343
+ Furthermore, we can deduce that INL(ϕ + log |JacX|) = INO(ϕ), the Nadel-Ohsawa
344
+ multiplier ideal sheaf associated to ϕ on X.
345
+ (2) When X is normal and ϕ has analytic singularities, INL(ϕ) coincides with the
346
+ Mather multiplier ideal sheaf defined in [9].
347
+ (3) For any ϕ ∈ QPsh(X), it follows that INL(ϕ) ⊂ MX is a coherent fractional ideal
348
+ sheaf and satisfies the strong openness, i.e., INL(ϕ) = �
349
+ ε>0
350
+ INL
351
+ �(1 + ε)ϕ�.
352
+ For our proof of Theorem 1.3, we need the following L2 estimates for the ∂-equation
353
+ and relative version of Grauert-Riemenschneider vanishing theorem for the higher direct
354
+ images.
355
+ Theorem 2.6. (cf. [10], Theorem 5.2). Let (X, ω) be an n-dimensional Kähler manifold,
356
+ which contains a weakly pseudoconvex Zariski open subset. Let L be a Hermitian line
357
+ bundle on X such that
358
+
359
+ −1Θ(L) + Ric(ω) > 0.
360
+ Then, for every smooth ϕ ∈ Psh(X) and v ∈ L2
361
+ 0,q(X, L) satisfying ∂v = 0 and
362
+
363
+ X
364
+ ⟨A−1v, v⟩ e−2ϕdVω < +∞
365
+ with the curvature operator A = [
366
+
367
+ −1Θ(L) + Ric(ω) +
368
+
369
+ −1∂∂ϕ, Λω] on X, there exists
370
+ u ∈ L2
371
+ 0,q−1(X, L) such that ∂u = v and
372
+
373
+ X
374
+ |u|2e−2ϕdVω ≤
375
+
376
+ X
377
+ ⟨A−1v, v⟩ e−2ϕdVω.
378
+ Theorem 2.7. ([11], Theorem 1.1). Let (X, ω) be an n-dimensional Kähler manifold which
379
+ is a Zariski open subset of some Stein space X∗, and L be a Hermitian line bundle on X.
380
+ If for any smooth ϕ ∈ SPsh(X∗) and v ∈ L2
381
+ 0,1(X, L) satisfying ∂v = 0 and
382
+
383
+ X
384
+ ⟨A−1
385
+ ϕ v, v⟩ e−2ϕdVω < +∞
386
+ with the curvature operator Aϕ = [
387
+
388
+ −1∂∂ϕ, Λω] on X, there exists u ∈ L2(X, L) such that
389
+ ∂u = v and
390
+
391
+ X
392
+ |u|2e−2ϕdVω ≤
393
+
394
+ X
395
+ ⟨A−1
396
+ ϕ v, v⟩ e−2ϕdVω,
397
+ then it follows that L ⊗ K−1
398
+ X is Nakano semi-positive on X.
399
+ Theorem 2.8. ([37], Corollary 1.5). Let π : X → Y be a surjective proper (locally) Kähler
400
+ morphism from a complex manifold X to a complex space Y, and (L, e−ϕL) be a (possibly
401
+ singular) Hermitian line bundle on X with semi-positive curvature. Then, the higher direct
402
+ image sheaf
403
+ Rqπ∗
404
+
405
+ KX ⊗ L ⊗ I (ϕL)
406
+
407
+ = 0,
408
+ for every q > dim X − dim Y.
409
+ Remark 2.9. Any log resolution π : �X → X of a coherent ideal sheaf I on a complex space
410
+ X is a locally Kähler (proper modification), which is locally a finite sequence of blow-ups
411
+ with smooth centers. Besides, any finite holomorphic mapping between complex spaces is
412
+ (locally) proper Kähler.
413
+
414
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
415
+ 7
416
+ In the remainder of this section, we recall some algebraic properties on the integral
417
+ closure of ideals.
418
+ Definition 2.10. ([46]). Let R be a commutative ring and I an ideal of R. An element
419
+ f ∈ R is said to be integrally dependent on I if it satisfies a relation
420
+ f d + a1 f d−1 + · · · + ad = 0
421
+ (ak ∈ Ik, 1 ≤ k ≤ d).
422
+ The set I consisting of all elements in R which are integrally dependent on I is called
423
+ the integral closure of I in R. I is called integrally closed if I = I. One can prove that I is
424
+ an ideal of R, which is the smallest integrally closed ideal in R containing I.
425
+ Definition 2.11. ([46]). Let R be a commutative ring with identity and let J ⊂ I be ideals in
426
+ R. J is said to be a reduction of I if there exists a nonnegative integer n such that In+1 = JIn.
427
+ A reduction J of I is called minimal if no ideal strictly contained in J is a reduction of
428
+ I. An ideal that has no reduction other than itself is called basic.
429
+ One can prove that minimal reductions do exist in Noetherian local rings and an ideal
430
+ which is a minimal reduction of a given ideal is necessarily basic. Moreover, if R is a
431
+ Noetherian ring, J ⊂ I is a reduction of I if and only if J = I.
432
+ In the analytic setting, we have the following characterization on integral closure and
433
+ reduction of ideals.
434
+ Theorem 2.12. (cf. [29], Théorème 2.1). Let X be a complex space and Y ⊂ X be a
435
+ proper closed complex subspace (may be non-reduced) defined by a coherent OX-ideal I
436
+ with x ∈ Y a point. Let J ⊂ OX be a coherent OX-ideal and I (resp. J) be the germ of
437
+ I (resp. J ) at x. Then, the following conditions are equivalent:
438
+ (1) J ⊂ I.
439
+ (2) For every morphism π : �X → X satisfying: (i) π is a proper and surjective, (ii) �X is
440
+ a normal complex space and (iii) I · O�X is an invertible OX-module, there exists
441
+ an open neighborhood U of x in X such that
442
+ J · O�X|π−1(U) ⊂ I · O�X|π−1(U).
443
+ (3) If V is an open neighborhood of x on which I and J are generated by their
444
+ global sections, then for every system of generators g1, ..., gr ∈ Γ(V, I ) and every
445
+ f ∈ Γ(V, J ), one can find an open neighborhood V′ of x and a constant C > 0
446
+ such that
447
+ |f(y)| ≤ C · sup
448
+ k
449
+ |gk(y)|, ∀y ∈ V′.
450
+ Remark 2.13. Let X be a normal complex space and I ⊂ OX a coherent ideal sheaf. Let
451
+ π : �X → X be any proper modification from a normal complex space �X onto X such that
452
+ I · O�X = O�X(−D) for some effective Cartier divisor D on �X. Then, we have π∗O�X(−D) =
453
+ I , the integral closure of I in OX.
454
+ Lemma 2.14. (cf. Example 9.6.19 in [28]; see also [10], Lemma 11.16). Let X be a
455
+ normal complex space of dimension n and a ⊂ OX a nonzero ideal. Then, there exists
456
+ an open covering {Uα}α∈N of X such that a|Uα has a reduction bα generated by at most n
457
+ elements.
458
+ 3. Proofs of the main results
459
+ 3.1. Proof of Theorem 1.3. Since all of the statements are local, without loss of gener-
460
+ ality, we may assume that X is an n≥2-dimensional normal (Hermitian) complex subspace
461
+ of some domain in CN with ϕ ∈ QPsh(X) and a = (g1, . . ., gr) · OX an ideal sheaf gen-
462
+ erated by holomorphic functions g1, . . . , gr on X. Moreover, we may also assume that ϕ
463
+ is (locally) a strictly psh function on X if necessary, by adding some smooth strictly psh
464
+
465
+ 8
466
+ ZHENQIAN LI
467
+ function. It is easy to see that the implications (1) =⇒ (2), (3) =⇒ (4), (5) =⇒ (6) and
468
+ (7) =⇒ (8) =⇒ (9) are trivial; in particular, we will present a proof in the following order:
469
+ (1)
470
+ � (2)
471
+ �❆
472
+
473
+
474
+
475
+
476
+
477
+
478
+
479
+
480
+
481
+
482
+
483
+
484
+
485
+ (10)
486
+ �④
487
+
488
+
489
+
490
+
491
+
492
+
493
+
494
+
495
+
496
+
497
+
498
+
499
+
500
+
501
+ (9)
502
+
503
+ (8)
504
+
505
+ (7)
506
+
507
+ (3)
508
+ �❈
509
+
510
+
511
+
512
+
513
+
514
+
515
+
516
+
517
+
518
+
519
+
520
+
521
+
522
+ � (4)
523
+
524
+ (5)
525
+ � (6)
526
+ �⑥
527
+
528
+
529
+
530
+
531
+
532
+
533
+
534
+
535
+
536
+
537
+
538
+
539
+
540
+ “(2) =⇒ (7)”. By the definition of Nadel-Lebesgue multiplier ideal sheaf, it follows
541
+ that
542
+ a · INL
543
+ �ϕ + (k − 1)ϕa
544
+ � ⊂ INL
545
+ �ϕ + kϕa
546
+ �,
547
+ and so it is sufficient to show the reverse inclusion.
548
+ Case (i). When r ≤ n.
549
+ Let π : �X → X be a common log resolution of JacX and a such that a · O�X = O�X(−F)
550
+ for some effective divisors F on �X. Denote by
551
+ Am := O�X(�K�X/X) ⊗ I (ϕ ◦ π + mϕa ◦ π)
552
+ = O�X(�K�X/X) ⊗ I (ϕ ◦ π) ⊗ O�X(−mF)
553
+ for any m ∈ N, and consider the Koszul complex determined by g1, . . ., gr:
554
+ 0 → ΛrV ⊗ O�X(rF) → · · · → Λ2V ⊗ O�X(2F) → V ⊗ O�X(F) → O�X → 0,
555
+ where V is the vector space spanned by g1, . . . , gr. Note that the Koszul complex is locally
556
+ split and its syzygies are locally free, so twisting through by any coherent sheaf will pre-
557
+ serve the exactness. Then, by twisting with Ak (k ≥ r), we obtain the following long exact
558
+ sequence
559
+ 0 → ΛrV ⊗ Ak−r → · · · → Λ2V ⊗ Ak−2 → V ⊗ Ak−1 → Ak → 0.
560
+ (⋆)
561
+ On the other hand, for any m ∈ N, by (2) we have the local vanishing of the higher direct
562
+ images Rqπ∗Am = 0 (1 ≤ q < n). Note that
563
+ INL
564
+ �ϕ + mϕa
565
+ � = π∗Am
566
+ by the functoriality property with respect to direct images of sheaves by modifications, and
567
+ then by taking direct images of (⋆) we will deduce the following so-called exact Skoda
568
+ complex (cf. [28], p. 228):
569
+ 0 → ΛrV ⊗ INL
570
+ �ϕ + (k − r)ϕa
571
+ � → · · · → V ⊗ INL
572
+ �ϕ + (k − 1)ϕa
573
+ � → INL
574
+ �ϕ + kϕa
575
+ � → 0.
576
+ In particular, the map V ⊗ INL
577
+ �ϕ + (k − 1)ϕa
578
+ � → INL
579
+ �ϕ + kϕa
580
+ � is surjective, by which we
581
+ can infer that INL
582
+ �ϕ + kϕa
583
+ � ⊂ a · INL
584
+ �ϕ + (k − 1)ϕa
585
+ � for any k ≥ r.
586
+ Case (ii). When r > n.
587
+ As the statement is local, then by Lemma 2.14 we may assume that b is a reduction of
588
+ a generated by n elements �g1, ...,�gn. Consider a common log resolution π : �X → X of
589
+ JacX, a and b such that a · O�X = b · O�X = O�X(−F) for some effective divisors F on �X.
590
+ Then, by the same argument as above, we can deduce the following exact Skoda complex:
591
+ 0 → ΛnV ⊗ INL
592
+ �ϕ + (k − n)ϕa
593
+ � → · · · → V ⊗ INL
594
+ �ϕ + (k − 1)ϕa
595
+ � → INL
596
+ �ϕ + kϕa
597
+ � → 0.
598
+
599
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
600
+ 9
601
+ for any k ≥ n, where V is the vector space spanned by �g1, ...,�gn. Therefore, it follows that
602
+ INL
603
+ �ϕ + kϕa
604
+ � ⊂ b · INL
605
+ �ϕ + (k − 1)ϕa
606
+ � ⊂ a · INL
607
+ �ϕ + (k − 1)ϕa
608
+ �.
609
+ “(3) =⇒ (5)”. It follows from the assumption that we have a Stein neighborhood Ω ⊂⊂
610
+ X of the point x with a Kähler metric ω such that Ric(ω) ≥ 0 on Ωreg. Let ϕ ∈ SPsh(Ω) be
611
+ any smooth strictly psh function on Ω and L = Ω × C be a trivial bundle equipped with the
612
+ trivial Hermitian metric, which implies that
613
+
614
+ −1Θ(L) + Ric(ω) +
615
+
616
+ −1∂∂ϕ ≥
617
+
618
+ −1∂∂ϕ > 0
619
+ on Ωreg.
620
+ Since Ω is a Stein space, we are able to choose a complex hypersurface Z ⊂ Ω which
621
+ contains the singular locus Ωsing of Ω such that Ω − Z ⊂ Ωreg is a Stein manifold. Then, by
622
+ Theorem 2.6 we obtain that, for any smooth ϕ ∈ SPsh(Ω) and v ∈ L2
623
+ 0,q(Ωreg, L) satisfying
624
+ ∂v = 0 and
625
+
626
+ Ωreg
627
+ ⟨A−1
628
+ ϕ v, v⟩ e−2ϕdVω < +∞,
629
+ we can find u ∈ L2
630
+ 0,q−1(Ωreg, L) such that ∂u = v and
631
+
632
+ Ωreg
633
+ |u|2e−2ϕdVω ≤
634
+
635
+ Ωreg
636
+ ⟨A−1
637
+ ϕ v, v⟩ e−2ϕdVω.
638
+ “(6) =⇒ (4)”. As a straightforward application of Theorem 2.7 on Ωreg, it yields that
639
+
640
+ −1Θ(L) + Ric(ω) ≥ 0 on Ωreg. Let Ω′ ⊂ Ω be a small Stein neighborhood of the point
641
+ x such that the Hermitian line bundle L has a smooth potential ψ on Ω′. Therefore, we
642
+ deduce that
643
+
644
+ −1∂∂ψ + Ric(ω) =
645
+
646
+ −1Θ(L) + Ric(ω) ≥ 0
647
+ on Ω′
648
+ reg.
649
+ “(4) =⇒ (7)”. Due to the definition and Lemma 2.14, it is sufficient to prove
650
+ INL
651
+ �ϕ + kϕa
652
+ � ⊂ a · INL
653
+ �ϕ + (k − 1)ϕa
654
+
655
+ for the case r ≤ n near the point x ∈ X. Let f ∈ INL
656
+ �ϕ + kϕa
657
+
658
+ x with k ≥ min{n, r} = r,
659
+ then by the strong openness of multiplier ideals there exists small enough ε > 0 such that
660
+ f ∈ INL
661
+ �ϕ + (k + ε)ϕa
662
+
663
+ x.
664
+ By the assumption of (4), we let Ω ⊂⊂ X be a Stein neighborhood of the point x with
665
+ a Kähler metric ω and a smooth real function ψ on Ω such that Ric(ω) +
666
+
667
+ −1∂∂ψ ≥ 0 on
668
+ Ωreg. After shrinking Ω if necessary, we may assume that the function ψ is bounded on Ω
669
+ and f is holomorphic on Ω such that
670
+
671
+
672
+ |f|2 · |g|−2(r+ε)e−2(ϕ+(k−r)ϕa)dVω < +∞.
673
+ In addition, we also choose a complex hypersurface Z ⊂ Ω which contains the singular
674
+ locus Ωsing of Ω and the common zero-set of holomorphic functions g1, ..., gr such that
675
+ Ω′ := Ω − Z is a Stein manifold.
676
+ Let E = Ω′ × Cr and Q = Ω′ × C be the trivial bundles on Ω′ and L = K−1
677
+ Ω′ be the
678
+ anti-canonical line bundle with the induced metric twisted by a weight e−ψ. The morphism
679
+ g : E → Q determined by holomorphic functions g1, ..., gr is given by
680
+ (h1, ..., hr) �→
681
+ r�
682
+ m=1
683
+ gm · hm = g · h.
684
+ Note that �
685
+ gg∗ = IdQ when rank Q = 1, and on Ω′ we have
686
+
687
+ −1Θ(L) − (r − 1 + ε)
688
+
689
+ −1Θ(det Q) = Ric(ω) +
690
+
691
+ −1∂∂ψ ≥ 0.
692
+
693
+ 10
694
+ ZHENQIAN LI
695
+ Thus, we can apply Theorem 1.1 on Ω′ and then obtain an r-tuple (h1, ..., hr) of holomor-
696
+ phic functions on Ω′ such that f = g · h on Ω′ and
697
+
698
+ Ω′ |h|2 · |g|−2(r−1+ε)e−2(ϕ+(k−r)ϕa)dVω =
699
+
700
+ Ω′ |h|2e−2(ϕ+(k−1+ε)ϕa)dVω < +∞.
701
+ We can now extend every hm to be a holomorphic function on Ω from the L2 estimate above
702
+ and normality of X, which implies that
703
+ INL
704
+ �ϕ + kϕa
705
+ � ⊂ a · INL
706
+ �ϕ + (k − 1 + ε)ϕa
707
+ � ⊂ a · INL
708
+ �ϕ + (k − 1)ϕa
709
+
710
+ on Ω; we finish the argument.
711
+ “(9) =⇒ (10)”. By the assumption, we have INL
712
+ �nϕa
713
+ � ⊂ a. Suppose that x ∈ X
714
+ is a singular point. Then, by the local parametrization for analytic sets, we can find a
715
+ local coordinate system (z′; z′′) = (z1, ..., zn; zn+1, ..., zN) near x such that for some constant
716
+ C > 0, we have |z′′| ≤ C · |z′| for any point z ∈ X near x.
717
+ Let a ⊂ OX be the ideal sheaf generated by holomorphic functions �z1, ...,�zn ∈ OX
718
+ (shrinking X if necessary), where �zk are the residue classes of zk in OX. From the non-
719
+ smoothness of X at the point x, we deduce that the embedding dimension dimC(mX,x/m2
720
+ X,x) ≥
721
+ n + 1 of X at x, which implies that there exists k0 (n + 1 ≤ k0 ≤ N) such that�zk0 � a.
722
+ On the other hand, after shrinking X again, it follows that
723
+
724
+ X
725
+ |zk0|2
726
+ |z′|2n dVω ≤ C2(1 + C2)n−1 ·
727
+
728
+ X
729
+ |z|−2(n−1)dVω < +∞,
730
+ where the finiteness of the integration follows from Lemma 2.4. Then, we infer that�zk0 ∈
731
+ INL
732
+ �nϕa
733
+ �, but�zk0 � a, which contradicts to the assumption INL
734
+ �nϕa
735
+ � ⊂ a. Thus, we obtain
736
+ that x ∈ X is a regular point.
737
+ “(10) =⇒ (1)”. It is a straightforward consequence of Theorem 2.8.
738
+ “(10) =⇒ (3)”. Since x is a regular point of X, after choosing an appropriate coordinate
739
+ neighborhood of x, we may assume that Ω ∋ x is a Stein domain in Cn. Therefore, we can
740
+ take ω =
741
+
742
+ −1
743
+ 2
744
+ n�
745
+ k=1
746
+ dzk ∧ d¯zk to be the standard Euclidean metric on Cn and then we have
747
+ Ric(ω) = 0 on Ω; the proof of Theorem 1.3 is concluded.
748
+
749
+ Remark 3.1. In addition, we can deduce from the proof of Theorem 1.3 that
750
+ (i) if (1) or (2) holds for each quasi-psh function ϕ with analytic singularities, then x ∈ X
751
+ is a regular point;
752
+ (ii) both of the statements (3) and (4) could be respectively modified to be Ric(ω) ≥ 0
753
+ and Ric(ω) +
754
+
755
+ −1∂∂ψ ≥ 0 on a Zariski open subset of Ω contained in Ωreg.
756
+ 3.2. Proof of Theorem 1.7. It is sufficient to prove the necessity.
757
+ Let x ∈ X be any point. Since ω is a smooth Kähler metric on X, then ω has smooth
758
+ local potentials, i.e., there exists a Stein neighborhood Ω ⊂ X of x and a smooth strictly psh
759
+ functions ψ on Ω such that ω =
760
+
761
+ −1∂∂ψ on Ωreg, which implies that Ric(ω)+
762
+
763
+ −1∂∂ψ ≥ 0
764
+ on Ωreg whenever Ric(ω) = ±ω, 0. Thus, it follows from (4) in Theorem 1.3 that x ∈ X is
765
+ a regular point.
766
+
767
+ Remark 3.2. The same arguments as in the proof of Theorem 1.3 and 1.7 also imply that
768
+ each local potential of weak Kähler-Einstein metric ω is C 2 differentiable on X if and only
769
+ if X is non-singular, and that there exists no singular normal Kähler space such that the
770
+ Kähler metric is Kähler-Einstein on the regular locus.
771
+ In fact, our method is still available when the weak Kähler-Einstein metric is (locally)
772
+ equivalent to the standard induced Kähler metric by restriction near the singularities; for
773
+ instance, when the weak Kähler-Einstein metric is of locally bounded coefficients.
774
+
775
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
776
+ 11
777
+ Appendix A. Uniform bounds of powers associated to an L2 division problem
778
+ The ideal membership is an important object to study in commutative algebra, algebraic
779
+ geometry and several complex variables, e.g., the famous Hilbert’s Nullstellensatz and
780
+ Briançon-Skoda theorem and so on. In this part, we are mainly interested in the uniform
781
+ bounds of powers associated to an L2 division problem, a kind of special ideal membership.
782
+ Let X be a Stein manifold of dimension n and a = (g1, . . . , gr)·OX an ideal sheaf generated
783
+ by holomorphic functions g1, . . . , gr on X. In general, the division problem states that,
784
+ given a positive integer k ∈ N and holomorphic function f on X, we wish to determine
785
+ when f is generated by holomorphic functions g1, . . ., gr; more precisely, when we can
786
+ find holomorphic functions h1, . . . , hr ∈ ak−1 on X such that
787
+ f =
788
+ r�
789
+ m=1
790
+ gm · hm.
791
+ Thanks to the Oka-Cartan’s theory on Stein manifolds, the division problem is solvable if
792
+ and only if f ∈ ak.
793
+ Note that the condition f ∈ ak is purely algebraic, and so it is natural to ask whether we
794
+ could find an analytic condition to replace the algebraic one. It is easy to see that f ∈ ak
795
+ implies that |f|e−ϕk is locally bounded on X, or L2
796
+ loc more generally; where ϕk := k log |g|
797
+ and |g|2 := |g1|2 + · · · + |gr|2. On the other hand, local boundedness of |f|e−ϕk is equivalent
798
+ to the fact that f ∈ ak, the integral closure of ak in OX (see Theorem 2.12). Thus, it is
799
+ an interesting question whether we could establish solvability of an L2 analogue of the
800
+ division problem.
801
+ Let ϕ ∈ Psh(X) be a psh function on X and denote by
802
+ A2
803
+ loc(X, ϕ) :=
804
+
805
+ f ∈ OX(X)
806
+ ��� |f|2e−2ϕ is locally integrable on X
807
+
808
+ .
809
+ Then, we raise the following L2 division problem:
810
+ Question A.1. Let X be an n-dimensional Stein manifold with a psh function ϕ ∈ Psh(X),
811
+ and a = (g1, . . . , gr) · OX an ideal sheaf generated by holomorphic functions g1, . . ., gr on
812
+ X. Given positive integer k ∈ N and f ∈ A2
813
+ loc(X, ϕ + ϕk), are there holomorphic functions
814
+ h1, . . ., hr ∈ A2
815
+ loc(X, ϕ + ϕk−1) such that
816
+ f =
817
+ r�
818
+ m=1
819
+ gm · hm?
820
+ A.1. A solution to Question A.1. Unfortunately, the answer of Question A.1 is negative
821
+ for general k (see Example A.2). Motivated by the Skoda’s L2 division theorem (cf. The-
822
+ orem 1.1), it seems to be reasonable to find a uniform integer k0, depending only on n,
823
+ such that Question A.1 is solvable for any k ≥ k0. The goal of this part is to present an
824
+ optimal uniform lower bounds of powers associated to Question A.1. In particular, we will
825
+ establish the following
826
+ Theorem A.1. There exists a uniform integer k0 = min{n, r} such that the solution to
827
+ Question A.1 is positive for any k ≥ k0. In further, the uniform lower bound k0 = min{n, r}
828
+ is optimal.
829
+ In fact, the optimality of uniform integer k0 = min{n, r} is straightforward by the fol-
830
+ lowing:
831
+ Example A.2. Let Bn(0) be the unit ball centered at the origin 0 = (0′, 0′′) in Cr×Cn−r (1 ≤
832
+ r ≤ n) and take g1 = z1, ..., gr = zr, f ≡ 1, ϕ ��� 0 on Bn(0). Then, for every k < k0 = r, the
833
+ answer of Question A.1 is negative.
834
+ Indeed, by the fact that the log canonical threshold LCT(0′,z′′)(ϕk) = r
835
+ k > 1 of ϕk at any
836
+ point (0′, z′′), one can derive that f ∈ A2
837
+ loc(Bn(0), ϕ + ϕk). Then, we infer from the fact that
838
+
839
+ 12
840
+ ZHENQIAN LI
841
+ f has no zeros in Bn(0) that there exist no holomorphic functions h1, . . . , hr on Bn(0) such
842
+ that f =
843
+ r�
844
+ m=1
845
+ gm · hm.
846
+ Proof of Theorem A.1. It follows from the local vanishing (Theorem 2.8) and the argu-
847
+ ments as in the proof of Theorem 1.3 that for any k ≥ min{n, r}, we have
848
+ I �ϕ + ϕk
849
+ � = a · I �ϕ + ϕk−1
850
+ �.
851
+ Let
852
+ τ : I �ϕ + ϕk−1
853
+ �⊕r −→ I �ϕ + ϕk
854
+
855
+ be the sheaf homomorphism defined by
856
+ τ(h1,x, . . . , hr,x) =
857
+ r�
858
+ m=1
859
+ gm · hm,x
860
+ for any germs hm,x ∈ I �ϕ + ϕk−1
861
+
862
+ x. Then, we have an exact sequence of sheaves
863
+ I �ϕ + ϕk−1
864
+ �⊕r
865
+ τ
866
+ −→ I �ϕ + ϕk
867
+ � −→ 0.
868
+ It follows from the Oka-Cartan theory on Stein manifolds that the induced sequence of
869
+ sections
870
+ Γ
871
+
872
+ X, I �ϕ + ϕk−1
873
+ �⊕r�
874
+ τ∗
875
+ −→ Γ
876
+
877
+ X, I �ϕ + ϕk
878
+ ��
879
+ −→ 0
880
+ is also exact, which implies that any section f ∈ Γ
881
+
882
+ X, I �ϕ + ϕk
883
+ ��
884
+ can be written as the
885
+ image f =
886
+ r�
887
+ m=1
888
+ gm · hm for some sections hm ∈ Γ
889
+
890
+ X, I �ϕ + ϕk−1
891
+ ��
892
+ .
893
+
894
+ Remark A.3. (An alternative argument on Theorem A.1). In fact, we could also give an-
895
+ other argument on the proof of Theorem A.1 depending on the strong openness of multi-
896
+ plier ideals established by Guan-Zhou [21] and the Skoda’s L2 division theorem for holo-
897
+ morphic functions (see Theorem 1.1).
898
+ Since the statement is local, it follows from Lemma 2.14 that it is sufficient to prove
899
+ I �ϕ+ϕk
900
+ � ⊂ a·I �ϕ+ϕk−1
901
+ � for the case r ≤ n. Given f ∈ Γ
902
+
903
+ X, I �ϕ+ϕk
904
+ ��
905
+ , after shrinking
906
+ X, we may assume that X is the unit ball in Cn and
907
+
908
+ X
909
+ |f|2e−2(ϕ+ϕk)dλn =
910
+
911
+ X
912
+ |f|2 · |g|−2ke−2ϕdλn < +∞.
913
+ Then, for each k ≥ r, by the strong openness of multiplier ideals there exists sufficiently
914
+ small ε > 0 such that
915
+
916
+ X
917
+ |f|2e−2(ϕ+(1+ε)ϕk)dλn =
918
+
919
+ X
920
+ |f|2 · |g|−2(1+ε)ke−2ϕdλn < +∞,
921
+ shrinking X if necessary. Finally, combining with Theorem 1.1, we deduce the desired
922
+ result.
923
+ A.2. A global L2 version of Question A.1. Let (X, ω) be an n-dimensional Stein manifold
924
+ with a Kähler form ω. Let ϕ ∈ Psh(X) and I = (g1, . . . , gr) · OX an ideal sheaf generated
925
+ by holomorphic functions g1, . . . , gr on X. Denote by
926
+ A2(X, ϕ) :=
927
+
928
+ f ∈ OX(X)
929
+ �����
930
+
931
+ X
932
+ |f|2e−2ϕdVω < +∞
933
+
934
+ .
935
+ Then, we have the following global analogue of Question A.1:
936
+ Question A.2. Can we find a uniform integer k0 such that for each k ≥ k0 and f ∈ A2(X, ϕ+
937
+ ϕk), there exist h1, . . ., hr ∈ A2(X, ϕ + ϕk−1) satisfying
938
+ f =
939
+ r�
940
+ m=1
941
+ gm · hm?
942
+ As an immediate consequence of Theorem 1.1, we obtain the following
943
+
944
+ ON SKODA’S THEOREM FOR NADEL-LEBESGUE MULTIPLIER IDEAL SHEAVES
945
+ 13
946
+ Theorem A.4. Let X be a pseudoconvex domain in Cn. Then, there exists a uniform integer
947
+ k0 = min{n + 2, r + 1} such that the solution to Question A.2 is positive.
948
+ Remark A.5. (1) More generally, Theorem A.4 also holds for any complete Kähler domain
949
+ in Cn with smooth psh function ϕ ∈ Psh(X).
950
+ (2) In this case, combining with the Example A.2, it follows that the optimal uniform
951
+ lower bound k0 is at least min{n, r}, and at most min{n + 2, r + 1}.
952
+ References
953
+ [1] A. Andreotti, E. Vesentini, Carleman estimates for the Laplace-Beltrami equation in complex manifolds,
954
+ Publ. Math. Inst. Hautes Études Sci. 25 (1965), 81–130.
955
+ [2] T. Aubin, Équations du type Monge-Ampère sur les variétés kählériennes compactes, Bull. Sci. Math. (2)
956
+ 102 (1978), 63–95.
957
+ [3] R. J. Berman, K-polystability of Q-Fano varieties admitting Kähler-Einstein metrics, Invent. Math. 203
958
+ (2016), 973–1025.
959
+ [4] R. J. Berman, S. Boucksom, P. Eyssidieux, et al., Kähler-Einstein metrics and the Kähler-Ricci flow on log
960
+ Fano varieties, J. Reine Angew. Math. 751 (2019), 27–89.
961
+ [5] R. J. Berman, H. Guenancia, Kähler-Einstein metrics on stable varieties and log canonical pairs, Geom.
962
+ Funct. Anal. 24 (2014), 1683–1730.
963
+ [6] J. Briançon, H. Skoda, Sur la clôture intégrale d’un idéal de germes de fonctions holomorphes en un point
964
+ de Cn, C. R. Acad. Sc. Paris, Sér. A 278 (1974), 949–951.
965
+ [7] S.-K. Chiu, G. Székelyhidi, Higher regularity for singular Kähler-Einstein metrics, preprint, arXiv:
966
+ 2202.11083.
967
+ [8] V. Datar, X. Fu, J. Song, Kähler-Einstein metrics near an isolated log-canonical singularity, preprint,
968
+ arXiv: 2106.05486.
969
+ [9] T. de Fernex, R. Docampo, Jacobian discrepancies and rational singularities, J. Eur. Math. Soc. 16 (2014),
970
+ 165–199.
971
+ [10] J.-P. Demailly, Analytic Methods in Algebraic Geometry, Higher Education Press, Beijing, 2010.
972
+ [11] F. S. Deng, J. F. Ning, Z. W. Wang, X. Y. Zhou, Positivity of holomorphic vector bundles in terms of
973
+ Lp-estimates for ∂, to appear in Math. Ann., https://doi.org/10.1007/s00208-021-02348-7.
974
+ [12] L. Ein, S. Ishii, M. Musta¸t˘a, Multiplier ideals via Mather discrepancy, in Minimal Models and Extremal
975
+ Rays (Kyoto, 2011), Adv. Stud. Pure Math., Vol. 70, Math. Soc. Japan, Tokyo, 2016, pp. 9–28.
976
+ [13] L. Ein, R. Lazarsfeld, A geometric effective Nullstellensatz, Invent. Math. 137 (1999), 427–448.
977
+ [14] P. Eyssidieux, V. Guedj, A. Zeriahi, Singular Kähler-Einstein metrics, J. Amer. Math. Soc. 22 (2009),
978
+ 607–639.
979
+ [15] J. E. Fornæss, L2 results for ∂ in a conic, in Complex Analysis and Related Topics (Cuernavaca, 1996),
980
+ Oper. Theory Adv. Appl., Vol. 114, Birkhäuser, Basel, 2000, pp. 67–72.
981
+ [16] J. E. Fornæss, E. A. Gavosto, The Cauchy Riemann equations on complex spaces, Duke Math. J. 93 (1998),
982
+ 453–477.
983
+ [17] J. E. Fornæss, N. Øvrelid, S. Vassiliadou, Local L2 results for ∂: the isolated singularities case, Internat.
984
+ J. Math. 16 (2005), 387–418.
985
+ [18] X. Fu, H.-J. Hein, X. Jiang, Asymptotics of Kähler-Einstein metrics on complex hyperbolic cusps, preprint,
986
+ arXiv: 2108.13390.
987
+ [19] H. Grauert, R. Remmert, Coherent Analytic Sheaves, Grundlehren Math. Wiss., Vol. 265, Springer-Verlag,
988
+ Berlin, 1984.
989
+ [20] Q. A. Guan, Z. Q. Li, A characterization of regular points by Ohsawa-Takegoshi extension theorem, J.
990
+ Math. Soc. Japan 70 (2018), 403–408.
991
+ [21] Q. A. Guan, X. Y. Zhou, A proof of Demailly’s strong openness conjecture, Ann. of Math. (2) 182 (2015),
992
+ 605–616. See also arXiv: 1311.3781.
993
+ [22] V. Guedj, H. Guenancia, A. Zeriahi, Continuity of singular Kähler-Einstein potentials, to appear in Int.
994
+ Math. Res. Not. IMRN, https://doi.org/10.1093/imrn/rnab294.
995
+ [23] V. Guedj, A. Zeriahi, Degenerate Complex Monge-Ampère Equations, EMS Tracts in Mathematics, Vol.
996
+ 26, European Mathematical Society (EMS), Zürich, 2017.
997
+ [24] H.-J. Hein, S. Sun, Calabi-Yau manifolds with isolated conical singularities, Publ. Math. Inst. Hautes
998
+ Études Sci. 126 (2017), 73–130.
999
+ [25] L. Hörmander, L2 estimates and existence theorems for the ∂ operator, Acta Math. 113 (1965), 89–152.
1000
+ [26] L. Hörmander, An Introduction to Complex Analysis in Several Variables, 3rd edition, North-Holland
1001
+ Mathematical Library, Vol. 7, North-Holland Publishing Co., Amsterdam, 1990.
1002
+ [27] C. Huneke, Uniform bounds in Noetherian rings, Invent. Math. 107 (1992), 203–223.
1003
+ [28] R. Lazarsfeld, Positivity in Algebraic Geometry II, Ergeb. Math. Grenzgeb. (3), Vol. 49, Springer-Verlag,
1004
+ Berlin, 2004.
1005
+
1006
+ 14
1007
+ ZHENQIAN LI
1008
+ [29] M. Lejeune-Jalabert, B. Teissier, Clôture intégrale des idéaux et équisingularité, Ann. Fac. Sci. Toulouse
1009
+ Math. 17 (2008), 781–859.
1010
+ [30] C. Li, G-uniform stability and Kähler-Einstein metrics on Fano varieties, Invent. Math. 227 (2022), 661–
1011
+ 744.
1012
+ [31] C. Li, G. Tian, F. Wang, On the Yau-Tian-Donaldson conjecture for singular Fano varieties, Comm. Pure
1013
+ Appl. Math. 74 (2021), 1748–1800.
1014
+ [32] C. Li, G. Tian, F. Wang, The uniform version of Yau-Tian-Donaldson conjecture for singular Fano vari-
1015
+ eties, Peking Math. J. 5 (2022), 383–426.
1016
+ [33] Z. Q. Li, Nadel-Ohsawa multiplier ideal sheaves on complex spaces with singularities, submitted. See also
1017
+ arXiv: 2003.11717.
1018
+ [34] Z. Q. Li, Analytic adjoint ideal sheaves associated to plurisubharmonic functions II, Ann. Sc. Norm. Super.
1019
+ Pisa Cl. Sci. (5) XXII (2021), 183–193.
1020
+ [35] Z. Q. Li, On the Briançon-Skoda theorem for analytic local rings with singularities, J. Algebra 577 (2021),
1021
+ 45–60.
1022
+ [36] Y. Liu, C. Xu, Z. Zhuang, Finite generation for valuations computing stability thresholds and applications
1023
+ to K-stability, Ann. of Math. (2) 196 (2022), 507–566.
1024
+ [37] S. Matsumura, Injectivity theorems with multiplier ideal sheaves for higher direct images under Kähler
1025
+ morphisms, Algebr. Geom. 9 (2022), 122–158.
1026
+ [38] N. Øvrelid, S. Vassiliadou, L2-∂-cohomology groups of some singular complex spaces, Invent. Math. 192
1027
+ (2013), 413–458.
1028
+ [39] W. L. Pardon, M. A. Stern, L2-∂-cohomology of complex projective varieties, J. Amer. Math. Soc. 4 (1991),
1029
+ 603–621.
1030
+ [40] R. Richberg, Stetige streng pseudokonvexe Funktionen, Math. Ann. 175 (1968), 257–286.
1031
+ [41] J. Ruppenthal, L2-theory for the ∂-operator on compact complex spaces, Duke Math. J. 163 (2014), 2887–
1032
+ 2934.
1033
+ [42] Y.-T. Siu, Invariance of plurigenera, Invent. Math. 134 (1998), 661–673.
1034
+ [43] Y.-T. Siu, Multiplier ideal sheaves in complex and algebraic geometry, Sci. China Ser. A 48 (2005), 1–31.
1035
+ [44] H. Skoda, Application des techniques L2 à la théorie des idéaux d’une algèbre de fonctions holomorphes
1036
+ avec poids, Ann. Sci. École Norm. Sup. (4) 5 (1972), 545–579.
1037
+ [45] H. Skoda, Morphismes surjectifs de fibrés vectoriels semi-positifs, Ann. Sci. École Norm. Sup. (4) 11
1038
+ (1978), 577–611.
1039
+ [46] I. Swanson, C. Huneke, Integral Closure of Ideals, Rings, and Modules, London Math. Soc. Lecture Note
1040
+ Series, Vol. 336, Cambridge University Press, Cambridge, 2006.
1041
+ [47] G. Székelyhidi, An Introduction to Extremal Kähler Metrics, Graduate Studies in Math., Vol. 152, Amer.
1042
+ Math. Soc., Providence, RI, 2014.
1043
+ [48] C. Xu, K-stability of Fano varieties: an algebro-geometric approach, EMS Surv. Math. Sci. 8 (2021),
1044
+ 265–354.
1045
+ [49] C. Xu, K-stability: the recent interaction between algebraic geometry and complex geometry, Notices
1046
+ Amer. Math. Soc. 69 (2022), 1126–1136.
1047
+ [50] S.-T. Yau, On the Ricci curvature of a compact Kähler manifold and the complex Monge-Ampère equation
1048
+ I, Comm. Pure Appl. Math. 31 (1978), 339–411.
1049
+
GdAyT4oBgHgl3EQfSvfs/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
GdE1T4oBgHgl3EQfrAWz/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6efb0986cb24d39b6c7aefa151b5b456e245413a0a40733e465176eacffeceb
3
+ size 1572909
JdAyT4oBgHgl3EQffviy/content/2301.00347v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8dc0add02493ac61744d7f2b5206c4533b71f8ef732409d455847a99eeb6e4
3
+ size 1819072
OtFKT4oBgHgl3EQffy6V/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7472b93ac2291f60bbcbaf0554f9616cdfaf039b41232570995558bfe6d06456
3
+ size 70524
OtFOT4oBgHgl3EQf3zQR/content/tmp_files/2301.12947v1.pdf.txt ADDED
@@ -0,0 +1,1348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Fighting the sign problem in a chiral random matrix model with contour deformations
2
+ Matteo Giordano,1 Attila Pásztor,1 Dávid Pesznyák,1 and Zoltán Tulipánt1
3
+ 1ELTE Eötvös Loránd University, Institute for Theoretical Physics,
4
+ Pázmány Péter sétány 1/A, H-1117, Budapest, Hungary
5
+ We studied integration contour deformations in the chiral random matrix theory of Stephanov [1]
6
+ with the goal of alleviating the finite-density sign problem. We considered simple ansätze for the
7
+ deformed integration contours, and optimized their parameters.
8
+ We find that optimization of a
9
+ single parameter manages to considerably improve on the severity of the sign problem. We show
10
+ numerical evidence that the improvement achieved is exponential in the degrees of freedom of the
11
+ system, i.e., the size of the random matrix. We also compare the optimization method with contour
12
+ deformations coming from the holomorphic flow equations.
13
+ I.
14
+ INTRODUCTION
15
+ Euclidean quantum field theories at non-zero particle
16
+ density (or chemical potential) generally suffer from a
17
+ complex action problem: the weights in the path integral
18
+ representation are complex, and thus cannot be inter-
19
+ preted as a joint probability density function on the space
20
+ of field configurations (up to a proportionality factor).
21
+ This prevents the use of importance sampling methods
22
+ for the direct simulation of these theories. In QCD, this
23
+ complex action problem severely hampers first-principles
24
+ studies of dense matter in the core of neutron stars, neu-
25
+ tron star mergers, core collapse supernovae, as well as in
26
+ heavy ion collisions at certain collision energies.
27
+ In the presence of a complex action problem one can
28
+ still (in principle) simulate a modified theory with real
29
+ and positive weights, and then use reweighting methods
30
+ to calculate observables in the theory of interest. If the
31
+ target theory has field variables φ, path integral weights
32
+ wt(φ), and partition function Zt =
33
+
34
+ Dφ wt(φ), and the
35
+ simulated theory has the same field variables, but dif-
36
+ ferent – real and positive – path integral weights ws(φ)
37
+ and partition function Zs =
38
+
39
+ Dφ ws(φ), we can obtain
40
+ expectation values in the target theory via the formula
41
+ ⟨O⟩t =
42
+
43
+ wt
44
+ ws O
45
+
46
+ s
47
+
48
+ wt
49
+ ws
50
+
51
+ s
52
+ ,
53
+ ⟨O⟩x = 1
54
+ Zx
55
+
56
+ Dφ wx(φ)O(φ) , (1)
57
+ where x may stand for t or s and O(φ) is some phys-
58
+ ical observable of interest. The denominator in Eq. (1)
59
+ gives the ratio of the partition functions in the target and
60
+ simulated theories, i.e.,
61
+ � wt
62
+ ws
63
+
64
+ s
65
+ = Zt
66
+ Zs
67
+ .
68
+ (2)
69
+ This ratio is typically exponentially small in the physical
70
+ volume, with the exponent given by the free energy dif-
71
+ ference between the target and simulated theories. This
72
+ ratio is also a rough measure of the numerical difficulty
73
+ of a given reweighting scheme, with a given simulated
74
+ and target theory. In order for reweighting to be effec-
75
+ tive, one wants the target and simulated theories to be as
76
+ close to each other as possible. Ideally, one should find a
77
+ simulated theory with Zs ≈ Zt.
78
+ Two simple choices of a simulated theory are the phase-
79
+ quenched (PQ) theory, with simulated weights propor-
80
+ tional to
81
+ wPQ
82
+ s
83
+ ≡ |wt(φ)| ,
84
+ (3)
85
+ or – assuming that the partition function Zt is real –
86
+ the sign-quenched (SQ) theory, with simulated weights
87
+ proportional to
88
+ wSQ
89
+ s
90
+ ≡ |Re wt(φ)| .
91
+ (4)
92
+ For the first case (phase reweighting) the reweighting
93
+ factors wt/wPQ
94
+ s
95
+ ≡ eiθ are pure phases.
96
+ For the sec-
97
+ ond case (sign reweighting) the reweighting factors are
98
+ wt/wSQ
99
+ s
100
+ = eiθ/ |cos θ|. For certain observables, such as
101
+ manifestly real observables or observables with a con-
102
+ jugation (φ → φ) symmetry, one can substitute wt/wPQ
103
+ s
104
+ with cos θ and wt/wSQ
105
+ s
106
+ with a pure sign cos θ/ |cos θ|. For
107
+ phase or sign reweighting, we can then say that the com-
108
+ plex action problem becomes a sign problem: the cancel-
109
+ lations between contributions with different signs of cosθ
110
+ lead to a small Zt
111
+ Zs ratio, and in turn to small signal-to-
112
+ noise ratios in the expectation values of observables.
113
+ The sign-quenched ensemble always has a less severe
114
+ sign problem, due to the inequality Zt < ZSQ
115
+ s
116
+ < ZPQ
117
+ s
118
+ ,
119
+ which is a consequence of cos θ ≤ | cos θ| ≤ 1. However, in
120
+ the limit of a severe sign problem – i.e., as the distribution
121
+ of the argument θ tends to to a uniform distribution on
122
+ [−π, π) – the severity of the sign problem for these two
123
+ reweighting schemes only differs by a constant factor [2],
124
+ given by
125
+
126
+ ZPQ
127
+ s
128
+ /ZSQ
129
+ s
130
+ �2 → (π/2)2.
131
+ In QCD and in other (more or less) QCD-like models,
132
+ describing the interactions of several “flavors” of fermions,
133
+ the path integral weights can be written schematically as
134
+ wt(φ) = det M1(φ, µ1) . . . det MNf (φ, µNf )e−SB(φ), (5)
135
+ where the fields φ are real bosonic variables and SB
136
+ is the corresponding bosonic part of the action, Nf is
137
+ the number of fermion flavors in the model, det Mk is
138
+ the fermionic determinant of the kth flavor and µk is
139
+ the corresponding chemical potential, for k = 1, . . . , Nf.
140
+ arXiv:2301.12947v1 [hep-lat] 30 Jan 2023
141
+
142
+ 2
143
+ The source of the sign problem is the fermionic determi-
144
+ nant, which at non-zero µ is generally a complex number.
145
+ Moreover, an important feature of the sign problem in
146
+ QCD and QCD-like theories is that it tends to get much
147
+ worse in the ranges of µ where zeros of the determinant
148
+ in the complex µ plane become dense [3].
149
+ Nonetheless, reweighting from the phase- and sign-
150
+ quenched theories is starting to become feasible even in
151
+ full QCD [2, 4], which has recently led to the calculation
152
+ of the equation of state of a hot-and-dense quark-gluon
153
+ plasma in the region of chemical potentials covered by
154
+ the RHIC Beam Energy Scan [5]. However, the range of
155
+ practical applicability of such an approach is limited both
156
+ in volume and chemical potential by the smallness of the
157
+ ratio Zt/Zs. Lacking a solution of the sign problem, it is
158
+ then desirable to develop methods that at least alleviate
159
+ it, to extend the range of parameters that reweighting
160
+ methods can practically reach.
161
+ One possible route to do this is the use of contour
162
+ deformations in the path integral (see Ref. [6] for a re-
163
+ cent review). If the path integral weights wt(φ) are holo-
164
+ morphic functions of the field variables,1 the multivariate
165
+ Cauchy theorem guarantees that complexified integration
166
+ manifolds in the same homology class as the original one
167
+ yield the same partition function. However, the phase-
168
+ and sign-quenched integrands are not holomorphic, and
169
+ therefore the phase- and sign-quenched partition func-
170
+ tions are not invariant under such deformations. It may
171
+ then be possible to bring the ratios Zt/Zs closer to unity,
172
+ thus making reweighting more effective.
173
+ There are different ways to deform integration con-
174
+ tours. Historically, methods based on Lefschetz thimbles
175
+ appeared first [6, 9–14]. Lefschetz thimbles are the dis-
176
+ joint components of the integration contour defined by
177
+ requiring that the imaginary part of the classical action
178
+ is constant in each component. The thimble structure
179
+ of theories with a fermionic determinant is usually quite
180
+ complicated [15–19]. Simple toy models reveal the follow-
181
+ ing features: i) cancellations between competing thimbles
182
+ are very important for getting the correct results, and
183
+ ii) the thimbles themselves are not smooth at the zeros
184
+ of the fermionic determinant.
185
+ Thus, the use of thim-
186
+ bles might be impractical for such theories.
187
+ However,
188
+ Lefschetz thimbles are, in general, not the numerically
189
+ optimal integration contours [20], i.e., they are not nec-
190
+ essarily the contours with the largest Zt/Zs, so there is
191
+ no need to concentrate solely on them.
192
+ A second class of methods is based on numerical op-
193
+ timization.
194
+ The main idea here is to parametrize the
195
+ integration manifold by a finite number of parameters,
196
+ which are then optimized to make the sign problem
197
+ as mild as possible.
198
+ Such methods were applied to
199
+ a one-dimensional integral [21], the 0+1D scalar the-
200
+ 1 A notable exception is lattice QCD with rooted staggered
201
+ fermions [7, 8].
202
+ ory [22], the 0+1D Polyakov-improved Nambu-Jona-
203
+ Lasinio model [23], 0+1D QCD [24], 1+1D scalar field
204
+ theory [25], the 1+1D Thirring model [26], the 2+1D
205
+ Thirring model [27], Bose gases of several dimensions [28],
206
+ 1+1D U(1) gauge theory with a complex coupling con-
207
+ stant [29] and the 2+1D XY model at finite density [30].
208
+ Here, we apply contour optimization methods to a
209
+ fermionic toy model that shares relevant technical fea-
210
+ tures with finite chemical potential QCD: the chiral ran-
211
+ dom matrix model proposed by Stephanov in Ref. [1].
212
+ Since it is an exactly solvable model with a sign prob-
213
+ lem, the Stephanov model is a very useful testbed for
214
+ methods aimed at solving or alleviating the sign prob-
215
+ lem.
216
+ This model has been studied with the complex
217
+ Langevin approach [31–33], which fails for this particu-
218
+ lar model [34] even with the introduction of gauge cool-
219
+ ing [33]. There are also preliminary results for this model
220
+ with the tempered Lefschetz thimble method [35] which
221
+ is based on parallel tempering [36] in the flow time of
222
+ the holomorphic flow [11, 37]. This method – similarly
223
+ to other flow-based methods — produces a weaker sign
224
+ problem, albeit at the cost of substantially increasing the
225
+ per-configuration-cost of generating the ensemble com-
226
+ pared to ordinary phase reweighting.
227
+ In this paper we study the Stephanov model with op-
228
+ timization methods. There are, roughly speaking, two
229
+ approaches to such an optimization: one can look for the
230
+ optimum using either a very general ansatz with a large
231
+ number of parameters, or a very specific ansatz tailored
232
+ for the model at hand, and with a small number of pa-
233
+ rameters. The first approach has clearly the potential to
234
+ find a good optimum, e.g., using machine learning tech-
235
+ niques, but it also has some disadvantages. In fact, for
236
+ such a general approach the number of optimization pa-
237
+ rameters has to be increased as one increases the number
238
+ of degrees of freedom of the system. This means that the
239
+ cost of finding good contours might turn out to be pro-
240
+ hibitive, similarly to what happens with methods based
241
+ on Lefschetz thimbles. In this exploratory study we fol-
242
+ low the second, ad hoc approach, and optimize ansätze
243
+ with only few parameters. Moreover, the number of these
244
+ parameters is kept independent of the number of degrees
245
+ of freedom of the system.
246
+ We can then be sure that
247
+ the optimization itself is numerically cheap, and that the
248
+ per-configuration cost of generating the ensembles is es-
249
+ sentially as low as on the original contours. Obviously,
250
+ the drawback of this approach is that to write down an
251
+ ansatz with only a few parameters that produces a sub-
252
+ stantial improvement in the severity of the sign problem,
253
+ some physical or mathematical insight is needed.
254
+ For the toy model studied in this paper, the insight
255
+ required to use the ad hoc approach is available, and so
256
+ we can write down appropriate ansätze.
257
+ We will then
258
+ show that a quite cheap numerical optimization proce-
259
+ dure leads one to contours with a reduced sign problem.
260
+ We will also present numerical evidence that the reduc-
261
+ tion in the severity of the sign problem is exponential:
262
+ while the sign problem on the optimized contours is still
263
+
264
+ 3
265
+ exponential in the number of degrees of freedom, the cor-
266
+ responding exponent is reduced. This conclusion is simi-
267
+ lar to what some of us have shown in Ref. [30] for a purely
268
+ bosonic model (the 2+1 dimensional XY model at non-
269
+ zero chemical potential). Notably, such an exponential
270
+ reduction can be achieved without changing the number
271
+ of optimization parameters with the system size.
272
+ In this work we will only consider phase-quenched sim-
273
+ ulations, for simplicity.
274
+ Similar arguments and meth-
275
+ ods should, however, also apply to the sign-quenched
276
+ case [30].
277
+ The plan of the paper is the following: In Section II
278
+ we introduce the model discussed in this work. In Sec-
279
+ tion III we provide details on the different contour de-
280
+ formation procedures we tested. In Section IV we illus-
281
+ trate the chemical potential and volume dependence of
282
+ the achieved improvement and also compare our results
283
+ with a method based on Lefschetz thimbles: the holo-
284
+ morphic flow of Ref. [11]. We summarize our conclusions
285
+ in Section V.
286
+ II.
287
+ THE CHIRAL RANDOM MATRIX MODEL
288
+ Throughout this paper we will only consider Nf = 2
289
+ with µ1 = µ2 ≡ µ for simplicity.
290
+ The random ma-
291
+ trix model of Stephanov [1] for Nf degenerate flavors of
292
+ quarks is then defined by the partition function
293
+ ZNf
294
+ N
295
+ = eNµ2 �
296
+ dWdW † (det(D + m))Nf e−NTrW W †,
297
+ (6)
298
+ where the massless Dirac matrix is
299
+ D =
300
+
301
+ 0
302
+ iW + µ
303
+ iW † + µ
304
+ 0
305
+
306
+ ,
307
+ (7)
308
+ m is the quark mass and W is a general N × N complex
309
+ matrix. The model has no concept of physical volume.
310
+ The number of degrees of freedom of the model scales
311
+ with N 2.
312
+ The two observables we will study in this paper are the
313
+ chiral condensate:
314
+ Σ =
315
+ 1
316
+ 2N
317
+ ∂ log ZNf
318
+ N
319
+ ∂m
320
+ ,
321
+ (8)
322
+ and the quark density
323
+ n =
324
+ 1
325
+ 2N
326
+ ∂ log ZNf
327
+ N
328
+ ∂µ
329
+ .
330
+ (9)
331
+ An important feature of the model is that it can be solved
332
+ analytically, both in the N → ∞ limit where the integral
333
+ is dominated by a saddle point, and at finite N where it
334
+ reduces to the calculation of moments of Gaussian inte-
335
+ grals. Thus, in this particular model we will be able to
336
+ compare numerical results with exact analytic solutions.
337
+ The model shares with QCD the feature that the
338
+ phase-quenched theory corresponds to an isospin chemi-
339
+ cal potential, and has an analogue of the pion condensa-
340
+ tion transition at some µ = µPQ
341
+ c
342
+ . For chemical potentials
343
+ exceeding µPQ
344
+ c
345
+ the sign problem of the model is severe.
346
+ From the point of view of the Dirac spectrum, for µ = 0
347
+ the eigenvalues are purely imaginary, while for µ ̸= 0 the
348
+ eigenvalues of D acquire a real part, and are distributed
349
+ inside a strip of width µ2 in the real direction. When the
350
+ quark mass is inside this strip, the model has a severe sign
351
+ problem. This roughly corresponds to the analogue of the
352
+ pion condensed phase in the phase-quenched theory. Due
353
+ to these similarities, this model has been considered sev-
354
+ eral times in the literature as a good toy model for the
355
+ sign problem in QCD [34, 35].
356
+ We will consider the model for Nf = 2 and use the
357
+ same quark chemical potential for both fermion flavors.
358
+ In this model, unlike in QCD, the expectation value
359
+ of the average phase does not always tend to zero in the
360
+ limit of an infinite system. Rather, it only goes to zero
361
+ in a given range of chemical potentials bounded by the
362
+ solutions to the equation [38]:
363
+ 0 = 1 − µ2 +
364
+ m2
365
+ µ2 − m2 −
366
+ m2
367
+ 4(µ2 − m2)2 .
368
+ (10)
369
+ Using a quark mass of m = 0.2, the two solutions of this
370
+ equation are µ = 0.35 = µPQ
371
+ c
372
+ and 1.02. This is the regime
373
+ where the sign problem in the model is strongest.
374
+ III.
375
+ CONTOUR DEFORMATION METHODS
376
+ A.
377
+ Optimization method
378
+ We will restrict ourselves to ansätze with simple, ana-
379
+ lytically calculable Jacobians with O(N 0) computational
380
+ cost and a small number of parameters, independent of
381
+ the number of degrees of freedom.
382
+ Let A = Re W and B = Im W. These two real matrices
383
+ will be deformed to complex matrices α and β. Thus,
384
+ W = A + iB → X = α + iβ,
385
+ W † = AT − iBT → Y = αT − iβT.
386
+ (11)
387
+ After applying such a deformation X† ̸= Y . After the
388
+ deformation, the severity of the sign problem is given by:
389
+ ⟨eiθ⟩ =
390
+ �� det(D + m)detJ
391
+ |det(D + m)detJ |
392
+ �Nf
393
+ e−iNImTrXY
394
+
395
+ ,
396
+ (12)
397
+ where the Jacobian determinant is
398
+ detJ =
399
+ ����
400
+ ∂(α, β)
401
+ ∂(A, B)
402
+ ����.
403
+ (13)
404
+ B.
405
+ Holomorphic flow
406
+ Using the holomorphic flow (or generalized thimble
407
+ method) of Ref. [11] for the complexified action of the
408
+
409
+ 4
410
+ 0.6
411
+ 0.4
412
+ 0.2
413
+ 0.0
414
+ 0.2
415
+ 0.4
416
+ 0.6
417
+ k2
418
+ 0.4
419
+ 0.2
420
+ 0.0
421
+ 0.2
422
+ 0.4
423
+ 0.6
424
+ 0.8
425
+ 1.0
426
+ k1
427
+ m = 0.2, µ = 1.0, N = 2, Nf = 2
428
+ 0.00
429
+ 0.05
430
+ 0.10
431
+ 0.15
432
+ 0.20
433
+ 0.25
434
+ 0.30
435
+ 0.35
436
+ 0.40
437
+
438
+ exp(iθ)
439
+
440
+ 0.4
441
+ 0.2
442
+ 0.0
443
+ 0.2
444
+ 0.4
445
+ 0.6
446
+ p2
447
+ 0.4
448
+ 0.2
449
+ 0.0
450
+ 0.2
451
+ 0.4
452
+ 0.6
453
+ 0.8
454
+ 1.0
455
+ p1
456
+ m = 0.2, µ = 1.0, N = 2, Nf = 2
457
+ 0.00
458
+ 0.05
459
+ 0.10
460
+ 0.15
461
+ 0.20
462
+ 0.25
463
+ 0.30
464
+ 0.35
465
+ 0.40
466
+
467
+ exp(iθ)
468
+
469
+ FIG. 1: Left: the average phase with Ansatz-1 as a function of k1 and k2. There is a local minimum at k2 ≈ 0 and k1 > 0.
470
+ Right: the average phase with Ansatz-2 as a function of p1 and p2. There is an apparent saddle parallel to the p1 = 0 line at
471
+ p1 = k1 > 0.
472
+ Stephanov model,
473
+ S = −Nµ2 − Nf log det(D + m) + NTr(XY ),
474
+ (14)
475
+ we deform the integration manifold by evolving the orig-
476
+ inal one with the differential equation
477
+ dYij
478
+ dt
479
+ = ∂S
480
+ ∂Yij
481
+ = NXji − Nf[(XG)ji + iµGji],
482
+ (15)
483
+ where the overbar denotes complex conjugation, t is the
484
+ flow parameter and
485
+ G =
486
+
487
+ m2 − µ2 − iµ(X + Y ) + Y X
488
+ �−1
489
+ .
490
+ (16)
491
+ Solving this system of equations with initial conditions
492
+ X0 = W, Y0 = W † for a fixed flow time tf we obtain
493
+ a deformed manifold Mtf. We parametrize each point
494
+ on the flowed manifold by the real matrices A and B.
495
+ I.e., we parametrize the flowed manifold by the initial
496
+ conditions of the flow equation.
497
+ The computation of expectation values requires the Ja-
498
+ cobian of the holomorphic flow,
499
+ det J =
500
+ ����
501
+ ∂(X, Y )
502
+ ∂(A, B)
503
+ ���� ,
504
+ (17)
505
+ as well. Denoting the Hessian with H, the Jacobian ma-
506
+ trix J is obtained as the solution of the equation
507
+ dJ
508
+ dt = H J,
509
+ (18)
510
+ with initial conditions
511
+ JXij,Aij = 1, JXij,Bij = i, JYij,Aji = 1, JYij,Bji = −i.
512
+ (19)
513
+ Computing the Jacobian directly is numerically expen-
514
+ sive, so we estimate it [39] with
515
+ W = exp
516
+ � � T
517
+ 0
518
+ dt Tr H(t)
519
+
520
+ .
521
+ (20)
522
+ The difference between W and det J is taken into account
523
+ by reweighting when computing observables,
524
+ ⟨O⟩ =
525
+ ⟨Oe−∆S⟩S′
526
+ eff
527
+ ⟨e−∆S⟩S′
528
+ eff
529
+ ,
530
+ (21)
531
+ where S′
532
+ eff = S − ln W, ∆S = Seff − ReS′
533
+ eff and ⟨.⟩S′
534
+ eff is
535
+ the average with respect to e−ReS′
536
+ eff. This way, we needed
537
+ to compute det J exactly only for the configurations used
538
+ for measurements.
539
+ In the large flow time limit, the flowed manifold tends
540
+ towards the Lefschetz thimbles. At smaller flow times,
541
+ it still reduces the sign problem, although less than a
542
+ complete thimble decomposition would.
543
+ IV.
544
+ NUMERICAL RESULTS
545
+ A.
546
+ Simple ansätze
547
+ As a rule, all of our ansätze have been parametrized
548
+ such that the undeformed integration manifold is at value
549
+ zero for all optimizable parameters.
550
+
551
+ 5
552
+ 0
553
+ 200
554
+ 400
555
+ 600
556
+ 800
557
+ 1000
558
+ Nopt
559
+ 0.00
560
+ 0.05
561
+ 0.10
562
+ 0.15
563
+ 0.20
564
+ 0.25
565
+ 0.30
566
+ Contour deformation parameters
567
+ m = 0.2, µ = 1.0, N = 2, Nf = 2
568
+ Re(a)
569
+ Im(a)
570
+ Re(b)
571
+ Im(b)
572
+ Re(c)
573
+ Im(c)
574
+ Re(d)
575
+ Im(d)
576
+ Re(e)
577
+ Im(e)
578
+ Re(f)
579
+ Im(f)
580
+ Re(g)
581
+ Im(g)
582
+ Re(h)
583
+ Im(h)
584
+ Re(j)
585
+ Im(j)
586
+ Re(k)
587
+ Im(k)
588
+ 0
589
+ 200
590
+ 400
591
+ 600
592
+ 800
593
+ 1000
594
+ Nopt
595
+ 0.25
596
+ 0.30
597
+ 0.35
598
+ 0.40
599
+ 0.45
600
+
601
+ exp(iθ)
602
+
603
+ m = 0.2, µ = 1.0, N = 2, Nf = 2
604
+ FIG. 2: Left: parameters as a function of the optimization step for Ansatz-3. Right: the average phase as a function of the
605
+ optimization step for Ansatz-3.
606
+ Ansatz-1
607
+ From the definition in Eq. (7) it is easy to see that
608
+ the sign problem can be removed from the quark de-
609
+ terminant by a simple shift of the form α = A + iµ1.
610
+ This, however, introduces a sign problem in the Gaus-
611
+ sian term e−N Tr(XY ). By finding a trade-off between the
612
+ two terms, the severity of the sign problem may be op-
613
+ timized. This motivates our first ansatz, with two real
614
+ parameters k1 and k2 defined by
615
+ α = A + ik11
616
+ (22)
617
+ β = B + ik21.
618
+ (23)
619
+ The Jacobian determinant for this ansatz is simply unity.
620
+ The parameter k2 is introduced on a whim, as the ma-
621
+ trices A and B do not have to be treated symmetrically.
622
+ The results for the average phase in a scan in these two
623
+ parameters for N = 2, m = 0.2 and µ = 1.0 is shown in
624
+ Fig. 1 (left). While there is a clearly non-zero optimal
625
+ value for k1, the optimal value of the k2 parameter is near
626
+ zero. This remains true for all values of the parameters
627
+ N, µ and m we simulated.
628
+ Ansatz-2
629
+ When we introduce a shift A → A + ik1 the argument
630
+ of the Gaussian term changes according to
631
+ Tr(XY ) = Tr(AAT + BBT) − Nk2 + 2ikTrA.
632
+ (24)
633
+ This motivates our second ansatz, with two real param-
634
+ eters p1 and p2 defined by
635
+ α = A + ip11 + p2TrA1,
636
+ (25)
637
+ β = B.
638
+ (26)
639
+ The p1 parameter of this ansatz is identical to the k1
640
+ parameter of the previous ansatz. The Jacobian deter-
641
+ minant for this ansatz is simply detJ = 1 + Np2, i.e.,
642
+ configuration-independent, and can be ignored. The re-
643
+ sults for the average phase in a scan in these two param-
644
+ eters for N = 2, m = 0.2 and µ = 1.0 can be seen in
645
+ Fig. 1 (right). While there is a clearly non-zero optimal
646
+ value for p1 = k1, the p2 parameter only appears to move
647
+ on a saddle.
648
+ Ansatz-3
649
+ We now move on to a more complicated ansatz with
650
+ 10 complex (or 20 real) parameters a, b, c, d, e, f, g, h, j, k
651
+ defined by
652
+ α = (a + bTrA + cTrB)1 + (1 + d)A + eB
653
+ (27)
654
+ β = (f + gTrA + hTrB)1 + jA + (1 + k)B
655
+ (28)
656
+ The Jacobian determinant for this ansatz is
657
+ detJ =
658
+
659
+ (1 + d)(1 + k) − ej
660
+ �N 2−1×
661
+ ��
662
+ (1 + d) + Nb
663
+ ��
664
+ (1 + k) + Nh
665
+
666
+ −(e + Nc)(j + Ng)
667
+
668
+ .
669
+ (29)
670
+ The severity of the sign problem was then optimized via
671
+ the AdaDelta method [40], with the objective function
672
+ − log⟨eiθ⟩ = − log
673
+ Z
674
+ ZPQ
675
+ = − log Z + log ZPQ,
676
+ (30)
677
+ where we suppressed the N and Nf indices for the parti-
678
+ tion function. The gradient with right to the deformation
679
+ parameters is given by
680
+ ∇ log ZPQ = −⟨∇SA
681
+ eff⟩,
682
+ (31)
683
+ where
684
+ Sa
685
+ eff = NReTrXY − Nf log |detM| − log |detJ |
686
+ (32)
687
+ with gradient
688
+ ∇Sa
689
+ eff =NReTr
690
+
691
+ (∇X)Y + X(∇Y )
692
+
693
+ − Nf
694
+ 2 Tr
695
+
696
+ M −1(∇M) + M
697
+ −1(∇M)
698
+
699
+ − Re
700
+ �∇detJ
701
+ detJ
702
+
703
+ .
704
+ (33)
705
+ Note that for Ansatz-3 the Jacobian is independent of
706
+ the configuration, and the last term can be dropped from
707
+ Eq. (33). For Ansatz-4, to be discused below, the Jaco-
708
+ bian will depend on the configuration, and thus the last
709
+ term is needed. An example of such an optimization run
710
+ is shown in Fig. 2. As with the previous two ansätze,
711
+ only a single parameter emerges k1 = p1 = Ima.
712
+
713
+ 6
714
+ Ansatz-4
715
+ Experiments with the first three ansätze revealed only
716
+ one parameter of interest, which can be thought of as
717
+ a simple one-parameter imaginary shift of the trace of
718
+ the matrix A. One might wonder whether more general
719
+ deformations of the trace could lead to a better improve-
720
+ ment. Thus we look at non-linear deformations of the
721
+ trace τ = TrA of the matrix A with an undeformed B
722
+ matrix. The integral measure is given by
723
+ N
724
+
725
+ i,j=1
726
+ dAij = dτ
727
+ N
728
+
729
+ i,j=1
730
+ (i,j)̸=(N,N)
731
+ dAij
732
+ = dτ
733
+ N
734
+
735
+ i,j=1
736
+ i̸=j
737
+ dAij
738
+ N
739
+
740
+ k=1
741
+ d
742
+
743
+ Akk − τ
744
+ N
745
+
746
+ .
747
+ (34)
748
+ The deformed matrix α is obtained from A as
749
+ A = τ
750
+ N 1 +
751
+
752
+ A − τ
753
+ N 1
754
+
755
+ = τ
756
+ N 1 + ˜A
757
+
758
+ α = τ
759
+ N 1 + ˜A,
760
+ (35)
761
+ where Tr ˜A = 0 and
762
+ τ = t + if(τ; . . . ),
763
+ (36)
764
+ for some function f that depends on τ and possibly other
765
+ parameters. For simplicity, we choose f to be piecewise
766
+ linear,
767
+ f(τ; xk(τ), xk(τ)+1, yk(τ), yk(τ)+1) =
768
+ yk(τ)(xk(τ)+1 − τ)
769
+ xk(τ)+1 − xk(τ)
770
+ + yk(τ)+1(τ − xk(τ))
771
+ xk(τ)+1 − xk(τ)
772
+ .
773
+ (37)
774
+ The parameters to optimize are the yi, while the node
775
+ points xi of the linear interpolation are fixed parameters,
776
+ and chosen with regular spacing, xl+1 − xl = ∆ for all l,
777
+ and
778
+ k(τ) = floor
779
+ �τ − x0
780
+
781
+
782
+ .
783
+ (38)
784
+ By numerical experimentation we have found that the
785
+ choice of the node points is not important, as long as the
786
+ full interpolation range is large enough to cover the most
787
+ probable values of TrA on the original contours and ∆ is
788
+ small enough. If these conditions are met, optimal con-
789
+ tours with ansätze with different node points appear to
790
+ be piecewise approximations of the same smooth curve.
791
+ The Jacobian is
792
+ detJ = 1 + iyk(τ)+1 − yk(τ)
793
+
794
+ .
795
+ (39)
796
+ The parameters are then optimized as with Ansatz-3.
797
+ A comparison of the results from this ansatz with the
798
+ 6
799
+ 4
800
+ 2
801
+ 0
802
+ 2
803
+ 4
804
+ 6
805
+ ReTrA
806
+ 0.1
807
+ 0.0
808
+ 0.1
809
+ 0.2
810
+ 0.3
811
+ 0.4
812
+ 0.5
813
+ 0.6
814
+ 0.7
815
+ ImTrA
816
+ piecewise
817
+ α = A + ik ansatz
818
+ N = 2, Nf = 2, m = 0.2, µ = 1.0
819
+ FIG. 3:
820
+ Ansatz-4 (piecewise optimization of the trace) com-
821
+ pared to Ansatz-1 (imaginary constant shift of A proportional
822
+ to the unit matrix). The two procedures find essentially the
823
+ same contour, as the differing tails are at large values of |TrA|,
824
+ and have small statistical weight.
825
+ constant shift found using ansätze 1 to 3 is shown in
826
+ Fig. 3. For highly probable values of Tr A the two ansätze
827
+ agree, while for the highly improbably values of Tr A,
828
+ the optimization does not move the ansatz away from
829
+ the original contour, as there are no configuration to use
830
+ for the optimization of that part of the contour. These
831
+ two asymptotic regimes are smoothly connected.
832
+ The
833
+ measured sign problem on this contour is identical to the
834
+ one measured with ansätze 1 to 3, up to statistical errors
835
+ – not surprisingly since deviations of f from a constant
836
+ happen on unimportant configurations.
837
+ B.
838
+ Chemical potential and matrix size dependence
839
+ Now that we have discovered a good contour defor-
840
+ mation parameter, let us look at what kind of improve-
841
+ ments can be achieved by such a 1-parameter deforma-
842
+ tion. From here on out we show results with Ansatz-1,
843
+ with k2 set to zero.
844
+ The volume and chemical potential dependence of the
845
+ average phase for the original and optimized contours
846
+ is shown in Fig. 4. The "volume", i.e., matrix size de-
847
+ pendence at a fixed chemical potential in the left panel
848
+ reveals an improvement on the sign problem that is expo-
849
+ nential in the matrix size: while the severity of the sign
850
+ problem is roughly linear on a logarithmic plot for both
851
+ the original and optimized contours, the slopes are quite
852
+ different. The right panel shows the chemical potential
853
+ dependence for several values of N. Apparently, contour
854
+ optimization improves the most on the sign problem in
855
+ the regime where it is the most severe.
856
+ The statistical improvement factor, defined as the
857
+ square of the ratio of the average phase on the deformed
858
+ vs the original contours,
859
+ ��
860
+ eiθ�
861
+ orig /
862
+
863
+ eiθ�
864
+ def
865
+ �2
866
+ , is shown
867
+ on the left panel of Fig. 5 for N = 2, 4 and 6. For larger
868
+ matrices,
869
+
870
+ eiθ�
871
+ was zero within statistical errors on the
872
+ original contours, and this ratio could not be calculated.
873
+ We see that the ratio monotonically increases with N,
874
+ and as a function of µ it is maximal close to the value of
875
+ µ where the sign problem is the strongest. The optimal
876
+
877
+ 7
878
+ 2
879
+ 4
880
+ 6
881
+ 8
882
+ 10
883
+ N
884
+ 10
885
+ -3
886
+ 10
887
+ -2
888
+ 10
889
+ -1
890
+
891
+ exp(iθ)
892
+
893
+ m = 0.2, µ = 0.9, Nf = 2
894
+ deformed
895
+ original
896
+ 0.00
897
+ 0.25
898
+ 0.50
899
+ 0.75
900
+ 1.00
901
+ 1.25
902
+ 1.50
903
+ 1.75
904
+ 2.00
905
+ µ
906
+ 10
907
+ -3
908
+ 10
909
+ -2
910
+ 10
911
+ -1
912
+ 10
913
+ 0
914
+
915
+ exp(iθ)
916
+
917
+ m = 0.2, Nf = 2
918
+ original, N = 2
919
+ deformed
920
+ original, N = 4
921
+ deformed
922
+ original, N = 6
923
+ deformed
924
+ original, N = 8
925
+ deformed
926
+ FIG. 4: Left: dependence of the average phase on the size of the random matrix for the original and optimized contours. Right:
927
+ dependence of the average phase on the chemical potential for the original and optimized contours.
928
+ 0.00
929
+ 0.25
930
+ 0.50
931
+ 0.75
932
+ 1.00
933
+ 1.25
934
+ 1.50
935
+ 1.75
936
+ 2.00
937
+ µ
938
+ 0
939
+ 10
940
+ 20
941
+ 30
942
+ 40
943
+ 50
944
+ 60
945
+ 70
946
+ improvement factor
947
+ m = 0.2, Nf = 2
948
+ N = 2
949
+ N = 4
950
+ N = 6
951
+ 0.00
952
+ 0.25
953
+ 0.50
954
+ 0.75
955
+ 1.00
956
+ 1.25
957
+ 1.50
958
+ 1.75
959
+ 2.00
960
+ µ
961
+ 0.0
962
+ 0.1
963
+ 0.2
964
+ 0.3
965
+ 0.4
966
+ 0.5
967
+ 0.6
968
+ 0.7
969
+ Im(a) = k1 = p1
970
+ m = 0.2, Nf = 2
971
+ N = 2
972
+ N = 4
973
+ N = 6
974
+ N = 8
975
+ N = 10
976
+ FIG. 5: Left: dependence of the statistical improvement (calculated as the square of the ratio of the average phases on
977
+ the optimized and original contours) achieved by contour optimization as a function of µ for different matrix sizes. Right:
978
+ dependence of the optimal contour parameter k1 = p1 = Ima on µ for different matrix sizes.
979
+ values for the deformation parameter k1 = p1 = Ima for
980
+ different values of µ and N are shown in the right panel
981
+ of Fig. 5.
982
+ As a sanity check, we also calculated the expectation
983
+ value of the chiral condensate and the quark number on
984
+ both the original and the optimized contours, and com-
985
+ pared them to the analytic results, see Fig. 6. They both
986
+ show excellent agreement, but the optimized contours
987
+ have significantly smaller error bars.
988
+ C.
989
+ Comparison with the holomorphic flow
990
+ As experiments with simple ansätze so far revealed
991
+ only a single important contour deformation parameter,
992
+ it is a natural question to ask whether Lefschetz-thimble
993
+ based methods also “find” this deformation or not, and
994
+ whether by utilizing such methods it is possible to im-
995
+ prove the sign problem further compared to such a 1-
996
+ parameter deformation. For this reason, we performed
997
+ the holomorphic flow on our N = 2 random matrices, and
998
+ obtained an estimate of the k1 parameter from the flowed
999
+ variables via: kflow
1000
+ 1
1001
+ = Im ⟨Tr(α(tf) − A)⟩ /N.
1002
+ This k1
1003
+ can then be substituted back to the 1-parameter ansatz
1004
+ α = A + ik11 and the severity of the sign problem can
1005
+ be compared with the properly flowed manifold.
1006
+ The sign problem as a function of µ is shown on the
1007
+ original contour, the optimized contour, the flowed con-
1008
+ tour, and on the contour with k1 extracted from the flow
1009
+ in Fig. 7. A few observations can be drawn from this
1010
+ figure. For small chemical potentials, the flow performs
1011
+ better than the optimization, which does not noticeably
1012
+ improve the sign problem. For larger chemical potentials,
1013
+ optimization vastly outperforms the flow. Of course, this
1014
+ is only compared at a fixed flow time, and we do not
1015
+ know where the severity of the sign problem would end
1016
+ up at infinite flow time (on the thimbles). However, go-
1017
+ ing to large flow times gets very expensive already for
1018
+ small systems.
1019
+ For larger chemical potentials, the 1-parameter ansatz
1020
+ with k1 = kflow
1021
+ 1
1022
+ extracted from the flow gives very similar
1023
+ results as the full flow. This may be a hint for the possi-
1024
+ bility that at larger chemical potentials most of the im-
1025
+ provement from the flow comes from this simple deforma-
1026
+ tion. Interestingly, while the full flow at small chemical
1027
+ potentials gives a slightly weaker sign problem compared
1028
+ to the ansatz with kflow
1029
+ 1
1030
+ , at larger chemical potentials the
1031
+ situation is reversed: the sign problem is slightly weaker
1032
+ with kflow
1033
+ 1
1034
+ than with the solution of the full flow equation.
1035
+ While this may be somewhat surprising at first, it is not
1036
+
1037
+ 8
1038
+ 0.00
1039
+ 0.25
1040
+ 0.50
1041
+ 0.75
1042
+ 1.00
1043
+ 1.25
1044
+ 1.50
1045
+ 1.75
1046
+ 2.00
1047
+ µ
1048
+ 0.5
1049
+ 0.0
1050
+ 0.5
1051
+ 1.0
1052
+ 1.5
1053
+ 2.0
1054
+ 2.5
1055
+ Σ
1056
+ original, N = 2
1057
+ optimized
1058
+ analytic
1059
+ original, N = 4
1060
+ optimized
1061
+ analytic
1062
+ original, N = 6
1063
+ optimized
1064
+ analytic
1065
+ original, N = 8
1066
+ optimized
1067
+ analytic
1068
+ 0.00
1069
+ 0.25
1070
+ 0.50
1071
+ 0.75
1072
+ 1.00
1073
+ 1.25
1074
+ 1.50
1075
+ 1.75
1076
+ 2.00
1077
+ µ
1078
+ 1
1079
+ 0
1080
+ 1
1081
+ 2
1082
+ 3
1083
+ 4
1084
+ 5
1085
+ n
1086
+ original, N = 2
1087
+ optimized
1088
+ analytic
1089
+ original, N = 4
1090
+ optimized
1091
+ analytic
1092
+ original, N = 6
1093
+ optimized
1094
+ analytic
1095
+ original, N = 8
1096
+ optimized
1097
+ analytic
1098
+ FIG. 6: The chiral condensate (left) and the quark number (right) as a function of µ for several values of the matrix size N.
1099
+ Analytic results are compared with results from simulations on the original and on the improved contours.
1100
+ in contradiction with what we already now about contour
1101
+ deformations. The flow goes towards the Lefschetz thim-
1102
+ bles, which are not the numerically optimal contours, and
1103
+ thus there is no reason for the full flow curve to be always
1104
+ above the curve with the simple ansatz with kflow
1105
+ 1
1106
+ .
1107
+ V.
1108
+ SUMMARY AND DISCUSSION
1109
+ We have discussed contour deformations in the chiral
1110
+ random matrix model of Stephanov as a way to alleviate
1111
+ its sign problem. Using simple ad-hoc ansätze we iden-
1112
+ tified a single important deformation parameter, which
1113
+ allowed for an exponential reduction in the severity of
1114
+ the sign problem as a function of the matrix size.
1115
+ Our results are quite encouraging, as they show that a
1116
+ simple one-parameter optimization can lead to exponen-
1117
+ tially alleviating the sign problem even in a fermionic the-
1118
+ ory, where the thimble decomposition is complicated and
1119
+ contour deformation approaches based on them might
1120
+ not be numerically effective.
1121
+ The fermionic nature of
1122
+ the matter fields does not appear to be a fundamental
1123
+ obstruction in the construction of exponentially better
1124
+ contours.
1125
+ Furthermore, the phase diagram of the random matrix
1126
+ 0.00
1127
+ 0.25
1128
+ 0.50
1129
+ 0.75
1130
+ 1.00
1131
+ 1.25
1132
+ 1.50
1133
+ 1.75
1134
+ 2.00
1135
+ µ
1136
+ 0.0
1137
+ 0.2
1138
+ 0.4
1139
+ 0.6
1140
+ 0.8
1141
+ 1.0
1142
+ 1.2
1143
+
1144
+ exp(iθ)
1145
+
1146
+ m = 0.2, Nf = 2, N = 2
1147
+ original
1148
+ deformed: ansatz
1149
+ deformed: flow (tf = 0.04)
1150
+ deformed: ansatz with k1 from flow
1151
+ FIG. 7:
1152
+ The severity of the sign problem for N = 2 and m =
1153
+ 0.2 as a function of µ on the original contours, the optimized
1154
+ contours, the flowed contours and the contours where the k1
1155
+ parameter of the ansatz is extracted from the flow.
1156
+ model is similar to what we expect in full QCD: the chi-
1157
+ ral phase transition is “hidden behind” the pion conden-
1158
+ sation phase in the phase-quenched theory. Hence, this
1159
+ bulk thermodynamic feature – the existence of a phase
1160
+ transition in the phase-quenched theory – also does not
1161
+ appear to be a fundamental obstruction.
1162
+ The results and the ansätze in this paper, however,
1163
+ cannot be used directly to construct a good optimization
1164
+ ansatz in full QCD, as the toy model studied here and
1165
+ QCD differ on an important technical aspect. Concretely,
1166
+ in the Stephanov model there are contour deformations
1167
+ that can remove the sign problem from the fermion deter-
1168
+ minant for a single flavor (so from the full determinant
1169
+ when all chemical potentials are equal) – albeit at the
1170
+ cost of reintroducing it somewhere else in the Boltzmann
1171
+ weights. There are no such deformations in full QCD.
1172
+ The complexification of the SU(3) gauge group is the
1173
+ SL(3, C) group, which still requires a unit determinant.
1174
+ To remove the chemical potential from a single quark de-
1175
+ terminant the time-like links would have to be deformed
1176
+ to GL(3, C) matrices, with non-unit determinant, which
1177
+ lie outside the complexified gauge group.
1178
+ Comparison with the holomorphic flow method shows
1179
+ that as one goes near the Lefschetz thimbles in this
1180
+ model, the bulk (but not all) of the improvement on the
1181
+ severity of the sign problem is captured by these types of
1182
+ deformations – which have no direct analogue in QCD.
1183
+ In the future it will therefore be important to work with
1184
+ more realistic toy models of QCD or even full QCD it-
1185
+ self, as the choice of a suitable sign-problem improving
1186
+ ansatz appears to be strongly dependent on the exact
1187
+ symmetries and exact matter content of a given theory.
1188
+ Acknowledgements
1189
+ This work was supported by the NKFIH grant KKP-
1190
+ 126769. D.P. is supported by the ÚNKP-22-3 New Na-
1191
+ tional Excellence Program of the Ministry for Culture
1192
+ and Innovation from the source of the National Research,
1193
+ Development and Innovation Fund.
1194
+
1195
+ 9
1196
+ [1] M. A. Stephanov, “Random matrix model of QCD at
1197
+ finite density and the nature of the quenched limit,” Phys.
1198
+ Rev. Lett., vol. 76, pp. 4472–4475, 1996.
1199
+ [2] S. Borsanyi, Z. Fodor, M. Giordano, S. D. Katz, D. No-
1200
+ gradi, A. Pasztor, and C. H. Wong, “Lattice simulations
1201
+ of the QCD chiral transition at real baryon density,”
1202
+ Phys. Rev. D, vol. 105, no. 5, p. L051506, 2022.
1203
+ [3] K. Nagata, “Finite-density lattice QCD and sign prob-
1204
+ lem:
1205
+ Current status and open problems,” Prog. Part.
1206
+ Nucl. Phys., vol. 127, p. 103991, 2022.
1207
+ [4] M. Giordano, K. Kapás, S. D. Katz, D. Nógrádi, and
1208
+ A. Pásztor, “New approach to lattice QCD at finite den-
1209
+ sity; results for the critical end point on coarse lattices,”
1210
+ JHEP, vol. 05, p. 088, 2020.
1211
+ [5] S. Borsanyi, Z. Fodor, M. Giordano, J. N. Guenther, S. D.
1212
+ Katz, A. Pasztor, and C. H. Wong, “Equation of state of
1213
+ a hot-and-dense quark gluon plasma: lattice simulations
1214
+ at real µB vs. extrapolations,” 8 2022.
1215
+ [6] A. Alexandru, G. Basar, P. F. Bedaque, and N. C. War-
1216
+ rington, “Complex paths around the sign problem,” Rev.
1217
+ Mod. Phys., vol. 94, no. 1, p. 015006, 2022.
1218
+ [7] M. Golterman, Y. Shamir, and B. Svetitsky, “Break-
1219
+ down of staggered fermions at nonzero chemical poten-
1220
+ tial,” Phys. Rev. D, vol. 74, p. 071501, 2006.
1221
+ [8] M. Giordano, K. Kapás, S. D. Katz, D. Nógrádi, and
1222
+ A. Pásztor, “Radius of convergence in lattice QCD at
1223
+ finite µB with rooted staggered fermions,” Phys. Rev. D,
1224
+ vol. 101, no. 7, p. 074511, 2020.
1225
+ [9] M. Cristoforetti, F. Di Renzo, and L. Scorzato, “New
1226
+ approach to the sign problem in quantum field theories:
1227
+ High density QCD on a Lefschetz thimble,” Phys. Rev.
1228
+ D, vol. 86, p. 074506, 2012.
1229
+ [10] M. Cristoforetti,
1230
+ F. Di Renzo,
1231
+ A. Mukherjee,
1232
+ and
1233
+ L. Scorzato, “Monte Carlo simulations on the Lefschetz
1234
+ thimble:
1235
+ Taming the sign problem,”
1236
+ Phys. Rev. D,
1237
+ vol. 88, no. 5, p. 051501, 2013.
1238
+ [11] A. Alexandru, G. Başar, P. F. Bedaque, G. W. Ridgway,
1239
+ and N. C. Warrington, “Sign problem and Monte Carlo
1240
+ calculations beyond Lefschetz thimbles,” JHEP, vol. 05,
1241
+ p. 053, 2016.
1242
+ [12] M. Fukuma and N. Matsumoto, “Worldvolume approach
1243
+ to the tempered Lefschetz thimble method,”
1244
+ PTEP,
1245
+ vol. 2021, no. 2, p. 023B08, 2021.
1246
+ [13] F. Di Renzo, S. Singh, and K. Zambello, “Taylor ex-
1247
+ pansions on Lefschetz thimbles,” Phys. Rev. D, vol. 103,
1248
+ no. 3, p. 034513, 2021.
1249
+ [14] F. Di Renzo and K. Zambello, “Solution of the Thirring
1250
+ model in thimble regularization,” Phys. Rev. D, vol. 105,
1251
+ no. 5, p. 054501, 2022.
1252
+ [15] T. Kanazawa and Y. Tanizaki, “Structure of Lefschetz
1253
+ thimbles in simple fermionic systems,” JHEP, vol. 03,
1254
+ p. 044, 2015.
1255
+ [16] Y. Tanizaki, Y. Hidaka, and T. Hayata, “Lefschetz-
1256
+ thimble analysis of the sign problem in one-site fermion
1257
+ model,” New J. Phys., vol. 18, no. 3, p. 033002, 2016.
1258
+ [17] F. Di Renzo and G. Eruzzi, “One-dimensional QCD in
1259
+ thimble regularization,” Phys. Rev. D, vol. 97, no. 1,
1260
+ p. 014503, 2018.
1261
+ [18] K. Zambello and F. Di Renzo, “Towards Lefschetz thim-
1262
+ bles regularization of heavy-dense QCD,” PoS, vol. LAT-
1263
+ TICE2018, p. 148, 2018.
1264
+ [19] M. Ulybyshev, C. Winterowd, and S. Zafeiropoulos, “Lef-
1265
+ schetz thimbles decomposition for the Hubbard model
1266
+ on the hexagonal lattice,” Phys. Rev. D, vol. 101, no. 1,
1267
+ p. 014508, 2020.
1268
+ [20] S. Lawrence, “Beyond Thimbles: Sign-Optimized Mani-
1269
+ folds for Finite Density,” PoS, vol. LATTICE2018, p. 149,
1270
+ 2018.
1271
+ [21] Y. Mori, K. Kashiwa, and A. Ohnishi, “Toward solving
1272
+ the sign problem with path optimization method,” Phys.
1273
+ Rev. D, vol. 96, no. 11, p. 111501, 2017.
1274
+ [22] F. Bursa and M. Kroyter, “A simple approach to-
1275
+ wards the sign problem using path optimisation,” JHEP,
1276
+ vol. 12, p. 054, 2018.
1277
+ [23] K. Kashiwa, Y. Mori, and A. Ohnishi, “Controlling the
1278
+ model sign problem via the path optimization method:
1279
+ Monte Carlo approach to a QCD effective model with
1280
+ Polyakov loop,” Phys. Rev. D, vol. 99, no. 1, p. 014033,
1281
+ 2019.
1282
+ [24] Y. Mori, K. Kashiwa, and A. Ohnishi, “Path optimization
1283
+ in 0+1D QCD at finite density,” PTEP, vol. 2019, no. 11,
1284
+ p. 113B01, 2019.
1285
+ [25] Y. Mori, K. Kashiwa, and A. Ohnishi, “Application of
1286
+ a neural network to the sign problem via the path op-
1287
+ timization method,” PTEP, vol. 2018, no. 2, p. 023B04,
1288
+ 2018.
1289
+ [26] A.
1290
+ Alexandru,
1291
+ P.
1292
+ F.
1293
+ Bedaque,
1294
+ H.
1295
+ Lamm,
1296
+ and
1297
+ S. Lawrence, “Finite-Density Monte Carlo Calculations
1298
+ on Sign-Optimized Manifolds,” Phys. Rev. D, vol. 97,
1299
+ no. 9, p. 094510, 2018.
1300
+ [27] A. Alexandru, P. F. Bedaque, H. Lamm, S. Lawrence,
1301
+ and N. C. Warrington, “Fermions at Finite Density in
1302
+ 2+1 Dimensions with Sign-Optimized Manifolds,” Phys.
1303
+ Rev. Lett., vol. 121, no. 19, p. 191602, 2018.
1304
+ [28] F. Bursa and M. Kroyter, “Optimisation of complex inte-
1305
+ gration contours at higher order,” JHEP, vol. 04, p. 181,
1306
+ 2021.
1307
+ [29] K. Kashiwa and Y. Mori, “Path optimization for U(1)
1308
+ gauge theory with complexified parameters,” Phys. Rev.
1309
+ D, vol. 102, no. 5, p. 054519, 2020.
1310
+ [30] M. Giordano, K. Kapas, S. D. Katz, A. Pasztor, and
1311
+ Z. Tulipant, “Exponential reduction of the sign problem
1312
+ at finite density in the 2+1D XY model via contour de-
1313
+ formations,” Phys. Rev. D, vol. 106, no. 5, p. 054512,
1314
+ 2022.
1315
+ [31] G. Parisi, “On complex probabilities,” Phys. Lett. B,
1316
+ vol. 131, pp. 393–395, 1983.
1317
+ [32] G. Aarts, E. Seiler, and I.-O. Stamatescu, “The Complex
1318
+ Langevin method: When can it be trusted?,” Phys. Rev.
1319
+ D, vol. 81, p. 054508, 2010.
1320
+ [33] E. Seiler, D. Sexty, and I.-O. Stamatescu, “Gauge cooling
1321
+ in complex Langevin for QCD with heavy quarks,” Phys.
1322
+ Lett. B, vol. 723, pp. 213–216, 2013.
1323
+ [34] J. Bloch, J. Glesaaen, J. J. M. Verbaarschot, and
1324
+ S. Zafeiropoulos, “Complex Langevin Simulation of a
1325
+ Random Matrix Model at Nonzero Chemical Potential,”
1326
+ JHEP, vol. 03, p. 015, 2018.
1327
+ [35] M. Fukuma, N. Matsumoto, and Y. Namekawa, “Numer-
1328
+ ical sign problem and the tempered Lefschetz thimble
1329
+ method,” PoS, vol. CORFU2021, p. 254, 2022.
1330
+ [36] R. H. Swendsen and J.-S. Wang, “Replica monte carlo
1331
+ simulation of spin-glasses,” Phys. Rev. Lett., vol. 57,
1332
+
1333
+ 10
1334
+ pp. 2607–2609, Nov 1986.
1335
+ [37] M. Fukuma and N. Umeda, “Parallel tempering algo-
1336
+ rithm for integration over Lefschetz thimbles,” PTEP,
1337
+ vol. 2017, no. 7, p. 073B01, 2017.
1338
+ [38] J. Han and M. A. Stephanov, “A Random Matrix Study
1339
+ of the QCD Sign Problem,”
1340
+ Phys. Rev. D, vol. 78,
1341
+ p. 054507, 2008.
1342
+ [39] A. Alexandru, G. Basar, P. F. Bedaque, G. W. Ridgway,
1343
+ and N. C. Warrington, “Fast estimator of Jacobians in the
1344
+ Monte Carlo integration on Lefschetz thimbles,” Phys.
1345
+ Rev. D, vol. 93, no. 9, p. 094514, 2016.
1346
+ [40] M. D. Zeiler, “ADADELTA: An Adaptive Learning Rate
1347
+ Method ,” 2012.
1348
+
OtFOT4oBgHgl3EQf3zQR/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
UNAyT4oBgHgl3EQfVvd8/content/tmp_files/2301.00149v1.pdf.txt ADDED
@@ -0,0 +1,2351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Rethinking Rotation Invariance with Point Cloud Registration
2
+ Jianhui Yu, Chaoyi Zhang, Weidong Cai
3
+ School of Computer Science, University of Sydney, Australia
4
+ {jianhui.yu, chaoyi.zhang, tom.cai}@sydney.edu.au
5
+ Abstract
6
+ Recent investigations on rotation invariance for 3D point
7
+ clouds have been devoted to devising rotation-invariant fea-
8
+ ture descriptors or learning canonical spaces where objects
9
+ are semantically aligned. Examinations of learning frame-
10
+ works for invariance have seldom been looked into. In this
11
+ work, we review rotation invariance in terms of point cloud
12
+ registration and propose an effective framework for rota-
13
+ tion invariance learning via three sequential stages, namely
14
+ rotation-invariant shape encoding, aligned feature integration,
15
+ and deep feature registration. We first encode shape descrip-
16
+ tors constructed with respect to reference frames defined over
17
+ different scales, e.g., local patches and global topology, to
18
+ generate rotation-invariant latent shape codes. Within the in-
19
+ tegration stage, we propose Aligned Integration Transformer
20
+ to produce a discriminative feature representation by inte-
21
+ grating point-wise self- and cross-relations established within
22
+ the shape codes. Meanwhile, we adopt rigid transformations
23
+ between reference frames to align the shape codes for fea-
24
+ ture consistency across different scales. Finally, the deep in-
25
+ tegrated feature is registered to both rotation-invariant shape
26
+ codes to maximize feature similarities, such that rotation in-
27
+ variance of the integrated feature is preserved and shared se-
28
+ mantic information is implicitly extracted from shape codes.
29
+ Experimental results on 3D shape classification, part segmen-
30
+ tation, and retrieval tasks prove the feasibility of our work.
31
+ Our project page is released at: https://rotation3d.github.io/.
32
+ 1
33
+ Introduction
34
+ Point cloud analysis has recently drawn much interest from
35
+ researchers. As a common form of 3D representation, the
36
+ growing presence of point cloud data is encouraging the de-
37
+ velopment of many deep learning methods (Qi et al. 2017a;
38
+ Guo et al. 2021; Zhang et al. 2021), showing great success
39
+ for well-aligned point clouds on different tasks. However, it
40
+ is difficult to directly apply 3D models to real data as raw
41
+ 3D objects are normally captured at different viewing an-
42
+ gles, resulting in unaligned data samples, which inevitably
43
+ impact the deep learning models which are sensitive to rota-
44
+ tions. Therefore, rotation invariance becomes an important
45
+ research topic in the 3D domain.
46
+ To achieve rotation invariance, a straightforward way is
47
+ to augment training data with massive rotations which, how-
48
+ Copyright © 2023, Association for the Advancement of Artificial
49
+ Intelligence (www.aaai.org). All rights reserved.
50
+ Aligned Integration
51
+ Transformer
52
+ Registration
53
+ Registered U
54
+ (a)
55
+ (b)
56
+ (c)
57
+ Local & Global Descriptors
58
+ Correspondence
59
+ Mapping
60
+ Local Patches Pℓ Global Shape P"
61
+ Correspondence
62
+ Mapping
63
+ Fℓ
64
+ F"
65
+ U
66
+ RI
67
+ T
68
+ Correspondence
69
+ Mapping
70
+ Shape Descriptors
71
+ (d)
72
+ (e)
73
+ Source Points P# Target Points P$
74
+ T
75
+ Registration
76
+ F#
77
+ F$
78
+ TI
79
+ Encoding
80
+ Integration
81
+ Registration
82
+ Registered P!
83
+ Ours
84
+ PCR
85
+ Figure 1: Frameworks of our design (left) and robust point
86
+ cloud registration (right), where TI and RI are transforma-
87
+ tion invariance and rotation invariance, and T is the rigid
88
+ transformation. The dotted line indicates the computation of
89
+ T between reference frames.
90
+ ever, requires a large memory capacity and exhibits limited
91
+ generalization ability to unseen data (Kim, Park, and Han
92
+ 2020). There are attempts to align 3D inputs to a conical
93
+ pose (Jaderberg et al. 2015; Cohen et al. 2018), or to learn
94
+ rotation robust features via equivariance (Deng et al. 2021;
95
+ Luo et al. 2022), while these methods are not rigorously
96
+ rotation-invariant and present noncompetitive performance
97
+ on 3D shape analysis. To maintain consistent model be-
98
+ havior under random rotations, some methods (Zhang et al.
99
+ 2019; Chen et al. 2019; Xu et al. 2021) follow Drost et al.
100
+ (2010) to handcraft rotation-invariant point-pair features.
101
+ Others (Zhang et al. 2020; Li et al. 2021a; Zhao et al. 2022)
102
+ design robust features from equivariant orthonormal bases.
103
+ Most of the mentioned works either manipulate model in-
104
+ puts or generate canonical spaces to achieve rotation invari-
105
+ ance (RI). In this work, we review the problem of RI from
106
+ a different aspect: robust point cloud registration (PCR). We
107
+ find that PCR and RI share the same goal: PCR aligns low-
108
+ dimensional point cloud features (e.g., xyz) from the source
109
+ domain to the target domain regardless of transformations,
110
+ arXiv:2301.00149v1 [cs.CV] 31 Dec 2022
111
+
112
+ while RI can be considered to align high-dimensional la-
113
+ tent features to rotation-invariant features. Specifically, the
114
+ goal of PCR is to explicitly align the source point cloud to
115
+ the target, both representing the same 3D object, and for RI
116
+ learning, we implicitly align the final feature representation
117
+ of a 3D shape to a hidden feature of the same shape, which
118
+ is universally rotation-invariant to any rotations.
119
+ Motivated by this finding, we propose our learning frame-
120
+ work in Fig. 1 with three sequential stages, namely rotation-
121
+ invariant shape encoding, aligned feature integration, and
122
+ deep feature registration. Firstly, we (a) construct and feed
123
+ point pairs with different scales as model inputs, where we
124
+ consider local patches Pℓ with small number of points and
125
+ global shape Pg with the whole 3D points. Hence, the fi-
126
+ nal feature representation can be enriched by information
127
+ from different scales. Low-level rotation-invariant descrip-
128
+ tors are thus built on reference frames and encoded to gener-
129
+ ate latent shape codes Fℓ and Fg following recent PCR work
130
+ (Pan, Cai, and Liu 2022). Secondly, we (b) introduce a vari-
131
+ ant of transformer (Vaswani et al. 2017), Aligned Integration
132
+ Transformer (AIT), to implicitly integrate information from
133
+ both self- and cross-attention branches for effective feature
134
+ integration. In this way, information encoded from different
135
+ point scales is aggregated to represent the same 3D object.
136
+ Moreover, we consider Fℓ and Fg as unaligned since they
137
+ are encoded from unaligned reference frames. To address
138
+ the problem, we follow the evaluation technique proposed
139
+ in PCR (Pan, Cai, and Liu 2022), where we use relative ro-
140
+ tation information (T) with learnable layers to align Fℓ and
141
+ Fg for feature consistency. Finally, to ensure RI of the inte-
142
+ grated feature U, we follow PCR to (c) examine the corre-
143
+ spondence map of (Fg, U) and (Fℓ, U), such that the mu-
144
+ tual information between a local patch of a 3D object and
145
+ the whole 3D object is maximized, and RI is further ensured
146
+ in the final geometric feature.
147
+ The contributions of our work are summarized as follow-
148
+ ing three folds: (1) To our knowledge, we are the first in de-
149
+ veloping a PCR-cored representation learning framework to-
150
+ wards effective RI studies on 3D point clouds. (2) We intro-
151
+ duce Aligned Integration Transformer (AIT), a transformer-
152
+ based architecture to conduct aligned feature integration for
153
+ a comprehensive geometry study from both local and global
154
+ scales. (3) We propose a registration loss to maintain rota-
155
+ tion invariance and discover semantic knowledge shared in
156
+ different parts of the input object. Moreover, the feasibility
157
+ of our proposed framework is successfully demonstrated on
158
+ various 3D tasks.
159
+ 2
160
+ Related Work
161
+ Rotation Robust Feature Learning.
162
+ Networks that are
163
+ robust to rotations can be equivariant to rotations. Esteves
164
+ et al. (2018) and Cohen et al. (2018) project 3D data
165
+ into a spherical space for rotation equivariance and per-
166
+ form convolutions in terms of spherical harmonic bases.
167
+ Some (Spezialetti et al. 2020; Sun et al. 2021) learn canon-
168
+ ical spaces to unify the pose of point clouds. Recent
169
+ works (Luo et al. 2022; Deng et al. 2021; Jing et al. 2020)
170
+ vectorize the scalar activations and mapping SO(3) actions
171
+ to a latent space for easy manipulations. Although these
172
+ works present competitive results, they cannot be strictly
173
+ rotation-invariant. Another way for rotation robustness is to
174
+ learn rotation-invariant features. Handcraft point-pair fea-
175
+ tures are rotation-invariant (Zhang et al. 2019; Chen et al.
176
+ 2019; Xu et al. 2021), but they focus on local domains
177
+ and ignore the global overview of 3D objects. Others use
178
+ rotation-equivariant local reference frames (LRFs) (Zhang
179
+ et al. 2020; Thomas 2020; Kim, Park, and Han 2020) or
180
+ global reference frames (GRFs) (Li et al. 2021a) as model
181
+ inputs based on principal component analysis (PCA). How-
182
+ ever, they may produce inconsistent features across differ-
183
+ ent reference frames, which would limit the representational
184
+ power. In contrast to abovementioned methods with rotation
185
+ robust model inputs or modules, we examine the relation be-
186
+ tween RI and PCR and propose an effective framework.
187
+ 3D Robust Point Cloud Registration.
188
+ Given a pair of Li-
189
+ DAR scans, 3D PCR requires an optimal rigid transforma-
190
+ tion to best align the two scans. Despite the recent emerging
191
+ of ICP-based methods (Besl and McKay 1992; Wang and
192
+ Solomon 2019b), we follow robust correspondence-based
193
+ approaches in our work (Deng, Birdal, and Ilic 2018; Yuan
194
+ et al. 2020; Qin et al. 2022; Pan, Cai, and Liu 2022), where
195
+ RI is widely used to mitigate the impact of geometric trans-
196
+ formations during feature learning. Specifically, both Pan,
197
+ Cai, and Liu (2022) and Qin et al. (2022) analyze the en-
198
+ coding of transformation-robust information and introduce
199
+ a rotation-invariant module with contextual information into
200
+ their registration pipeline. All these methods showing im-
201
+ pressive results are closely related to rotation invariance. We
202
+ hypothesize that the learning framework of RI can be sim-
203
+ ilar to PCR, and we further prove in experiments that our
204
+ network is feasible and able to achieve competitive perfor-
205
+ mance on rotated point clouds.
206
+ Transformers in 3D Point Clouds.
207
+ Transformers (Doso-
208
+ vitskiy et al. 2021; Liu et al. 2021) applied to 2D vision have
209
+ shown great success, and they are gaining prominence in 3D
210
+ point clouds. For example, Zhao et al. (2021) uses vector-
211
+ ized self-attention (Vaswani et al. 2017) and positional em-
212
+ bedding for 3D modeling. Guo et al. (2021) proposes offset
213
+ attention for noise-robust geometric representation learning.
214
+ Cross-attention is widely employed for semantic informa-
215
+ tion exchange (Qin et al. 2022; Yu et al. 2021a), where fea-
216
+ ture relations between the source and target domains are ex-
217
+ plored. Taking advantage of both, we design a simple yet
218
+ effective feature integration module with self and cross re-
219
+ lations. In addition, transformation-related embeddings are
220
+ introduced for consistent feature learning.
221
+ Contrastive Learning with 3D Visual Correspondence.
222
+ Based on visual correspondence, contrastive learning aims
223
+ to train an embedding space where positive samples are
224
+ pushed together whereas negative samples are separated
225
+ away (He et al. 2020). The definition of positivity and neg-
226
+ ativity follows the visual correspondence maps, where pairs
227
+ with high confidence scores are positive otherwise negative.
228
+ Visual correspondence is important in 3D tasks, where se-
229
+ mantic information extracted from matched point pairs im-
230
+ proves the network’s understanding on 3D geometric struc-
231
+
232
+ tures. For example, PointContrast (Xie et al. 2020) explores
233
+ feature correspondence across multiple views of one 3D
234
+ point cloud with InfoNCE loss (Van den Oord, Li, and
235
+ Vinyals 2018), increasing the model performance for down-
236
+ stream tasks. Info3D (Sanghi 2020) and CrossPoint (Afham
237
+ et al. 2022) minimize the semantic difference of point fea-
238
+ tures under different poses. We follow the same idea by reg-
239
+ istering the deep features to rotation-invariant features at in-
240
+ termediate levels, increasing feature similarities in the em-
241
+ bedding space to ensure rotation invariance.
242
+ 3
243
+ Method
244
+ Given a 3D point cloud including Nin points with xyz co-
245
+ ordinates P = {pi ∈ R3}Nin
246
+ i=1, we aim to learn a shape en-
247
+ coder f that is invariant to 3D rotations: f(P) = f(RP),
248
+ where R ∈ SO(3) and SO(3) is the rotation group. RI can
249
+ be investigated and achieved through three stages, namely
250
+ rotation-invariant shape encoding (Section 3.1), aligned fea-
251
+ ture integration (Section 3.2), and deep feature registration
252
+ (Section 3.3).
253
+ 3.1
254
+ Rotation-Invariant Shape Encoding
255
+ In this section, we first construct the input point pairs from
256
+ local and global scales based on reference frames, follow-
257
+ ing the idea of Pan, Cai, and Liu (2022) to obtain low-level
258
+ rotation-invariant shape descriptors from LRFs and GRF di-
259
+ rectly. Then we obtain latent shape codes via two set abstrac-
260
+ tion layers as in PointNet++ (Qi et al. 2017b).
261
+ Rotation Invariance for Local Patches.
262
+ To construct
263
+ rotation-invariant features on LRFs, we hope to construct
264
+ an orthonormal basis for each LRF as p ∈ R3×3. Given a
265
+ point pi and its neighbor pj ∈ N(pi), we choose #»
266
+ xiℓ =
267
+ # »
268
+ pmpi/∥# »
269
+ pmpi∥2, where pm is the barycenter of the local ge-
270
+ ometry and ∥ · ∥2 is L2-norm. We then define #»
271
+ ziℓ follow-
272
+ ing Tombari, Salti, and Stefano (2010) to have the same di-
273
+ rection as an eigenvector, which corresponds to the smallest
274
+ eigenvalue via eigenvalue decomposition (EVD):
275
+ Σℓ
276
+ i =
277
+ |N (pi)|
278
+
279
+ j=1
280
+ αj (# »
281
+ pipj) (# »
282
+ pipj)⊤ , αj =
283
+ d − ∥# »
284
+ pipj∥2
285
+ �|N (pi)|
286
+ j=1
287
+ d − ∥# »
288
+ pipj∥2
289
+ ,
290
+ (1)
291
+ where αj is a weight parameter, allowing nearby pj to have
292
+ large contribution to the covariance matrix, and d is the max-
293
+ imum distance between pi and pj. Finally, we define #»
294
+ yiℓ as
295
+
296
+ ziℓ × #»
297
+ xiℓ. RI is introduced to pi with respect to its neigh-
298
+ bor pj as pℓ
299
+ ij = # »
300
+ pipj⊤Mℓ
301
+ i. Proofs of the equivariance of Mℓ
302
+ i
303
+ and invariance of pℓ
304
+ ij are shown in the supplementary ma-
305
+ terial. The latent shape code Fℓ ∈ RN×C is obtained via
306
+ PointNet++ and max-pooling.
307
+ Rotation Invariance for Global Shape.
308
+ We apply PCA
309
+ as a practical tool to obtain RI in a global scale. Similar to
310
+ Eq. 1, PCA is performed by
311
+ 1
312
+ N0
313
+ �N0
314
+ i=1(# »
315
+ pmpi)(# »
316
+ pmpi)⊤ =
317
+ UgΛgUg⊤, where pm is the barycenter of P, Ug
318
+ =
319
+ [# »
320
+ u1g, # »
321
+ u2g, # »
322
+ u3g] and Λg = diag(λg
323
+ 1, λg
324
+ 2, λg
325
+ 3) are eigenvec-
326
+ tor and eigenvalue matrices. We take Ug as the orthonor-
327
+ mal basis Mg = [#»x g, #»y g, #»z g] for GRF. By transform-
328
+ ing point pi with Ug, the shape pose is canonicalized as
329
+ pg
330
+ i = piMg. Proof of the RI of pg
331
+ i is omitted for its sim-
332
+ plicity, and Fg ∈ RN×C is obtained following PointNet++.
333
+ Sign Ambiguity.
334
+ EVD introduces sign ambiguity for
335
+ eigenvectors, which negatively impacts the model perfor-
336
+ mance (Bro, Acar, and Kolda 2008). The description of sign
337
+ ambiguity states that for a random eigenvector #»u, #»u and
338
+ #»u ′, with #»u ′ having an opposite direction to #»u, are both
339
+ acceptable solutions to EVD. To tackle this issue, we sim-
340
+ ply force #»
341
+ ziℓ of LRF to follow the direction of # »
342
+ opi, with o
343
+ being the origin of the world coordinate. We disambiguate
344
+ basis vectors in Mg by computing an inner product with
345
+ # »
346
+ pmpi, ∀i ∈ N0. Taking #»x g for example, its direction is con-
347
+ ditioned on the following term:
348
+ #»x g =
349
+ �#»x g,
350
+ if Sx ≥ N0
351
+ 2
352
+ #»x ′g,
353
+ otherwise
354
+ ,
355
+ Sx =
356
+ N0
357
+
358
+ i=1
359
+ 1[⟨#»x g, # »
360
+ pmpi⟩],
361
+ (2)
362
+ where ⟨·, ·⟩ is the inner product, 1[·] is a binary indicator that
363
+ returns 1 if the input argument is positive, otherwise 0. Sx
364
+ denotes the number of points where #»x g and # »
365
+ pmpi point to
366
+ the same direction. The same rule is applied to disambiguate
367
+ #»y g and #»z g by Sy and Sz. Besides, as mentioned in Li et al.
368
+ (2021a), Mg might be non-rotational (e.g., reflection). To
369
+ ensure Mg a valid rotation, we simply reverse the direction
370
+ of the basis vector whose S value is the smallest. More anal-
371
+ yses on sign ambiguity are in the supplementary material.
372
+ 3.2
373
+ Aligned Feature Integration
374
+ Transformer has been widely used in 3D domain to cap-
375
+ ture long-range dependencies (Yu et al. 2021b). In this sec-
376
+ tion, we introduce Aligned Integration Transformer (AIT),
377
+ an effective transformer to align latent shape codes with rel-
378
+ ative rotation angles and integrate information via attention-
379
+ based integration (Cheng et al. 2021). Within each AIT mod-
380
+ ule, we first apply Intra-frame Aligned Self-attention on Fℓ
381
+ and we do not encode Fg, which is treated as supplemen-
382
+ tary information to assist local geometry learning with the
383
+ global shape overview. We discuss that encoding Fg via self-
384
+ attention can increase model overfitting, thus lowering the
385
+ model performance. We will validate our discussion in Sec-
386
+ tion 4.4. Inter-frame Aligned Cross-attention is applied on
387
+ both Fℓ and Fg, and we use Attention-based Feature Inte-
388
+ gration module for information Aggregation.
389
+ Preliminary: Offset Attention.
390
+ AIT utilizes offset atten-
391
+ tion (Guo et al. 2021) for noise robustness. In the follow-
392
+ ing, we use subscripts sa and ca to denote implementations
393
+ related to self- and cross-attention, respectively. We first re-
394
+ view offset attention as follows:
395
+ F = φ(Foa) + Fin, Foa = Fin − ∥softmax(A)∥1v, A = qk⊤,
396
+ (3)
397
+ where q = FinWq, k = FinWk ∈ RN×d, and v =
398
+ FinWv ∈ RN×C are query, key, and value embeddings,
399
+ and Wq, Wk ∈ RC×d, Wv ∈ RC×C are the correspond-
400
+ ing projection matrices. ∥ · ∥1 is L1-norm and φ denotes a
401
+ multi-layer perceptron (MLP). Foa is offset attention-related
402
+ feature and A ∈ RN×N is the attention logits.
403
+
404
+ 𝑾𝒄𝒂
405
+ 𝒗
406
+ 𝑾𝒄𝒂
407
+ 𝒌
408
+ 𝑾𝒔𝒂
409
+ 𝒒
410
+ 𝐞𝒄𝒂
411
+ 𝜶 𝑾𝒄𝒂
412
+ 𝜶
413
+ 𝑭()
414
+ ℓ/,
415
+ 𝐹ℓ/,
416
+ 𝑨𝒄𝒂
417
+ 𝒂𝒕𝒕𝒏: 𝑁×𝑁
418
+ 𝑨𝒄𝒂
419
+ 𝒓𝒐𝒕: 𝑁×𝑁
420
+ 𝑨𝒄𝒂: 𝑁×𝑁
421
+ 𝐹,/ℓ
422
+ 𝑾𝒔𝒂
423
+ 𝒗
424
+ 𝑾𝒔𝒂
425
+ 𝒌
426
+ 𝑾𝒔𝒂
427
+ 𝒒
428
+ 𝐤𝒔𝒂: 𝑁×𝑑
429
+ 𝐪𝒔𝒂: 𝑁×𝑑
430
+ 𝐯𝒔𝒂: 𝑁×𝐶
431
+ 𝐞𝒔𝒂
432
+ 𝜶 𝑾𝒔𝒂
433
+ 𝜶
434
+ 𝑭()
435
+ ℓ/,
436
+ 𝐹ℓ/,
437
+ 𝑨𝒔𝒂
438
+ 𝒂𝒕𝒕𝒏: 𝑁×𝑁
439
+ 𝑨𝒔𝒂
440
+ 𝒓𝒐𝒕: 𝑁×𝑁
441
+ 𝑨1): 𝑁×𝑁
442
+ 𝑁×𝑁×𝑑
443
+ 𝑁×𝑑
444
+ 𝐯𝒄𝒂: 𝑁×𝐶
445
+ 𝐤𝒔𝒂: 𝑁×𝑑
446
+ 𝐪𝒄𝒂: 𝑁×𝑑
447
+ (a) Intra-frame Aligned Self-attention
448
+ (b) Inter-frame Aligned Cross-attention
449
+ 𝐚𝐝𝐝𝐢𝐭𝐢𝐨𝐧
450
+ 𝐦𝐚𝐭𝐫𝐢𝐱 𝐩𝐫𝐨𝐝𝐮𝐜𝐭
451
+ 𝐬𝐮𝐛𝐭𝐫𝐚𝐜𝐭𝐢𝐨𝐧
452
+ 𝐚𝐝𝐝𝐢𝐭𝐢𝐨𝐧
453
+ 𝐦𝐚𝐭𝐫𝐢𝐱 𝐩𝐫𝐨𝐝𝐮𝐜𝐭
454
+ 𝐬𝐮𝐛𝐭𝐫𝐚𝐜𝐭𝐢𝐨𝐧
455
+ Figure 2: Illustrations of (a) Intra-frame Aligned Self-attention and (b) Inter-frame Aligned Cross-attention modules. Note that
456
+ we only present processes for computing Foa in both modules.
457
+ Intra-frame Aligned Self-attention.
458
+ Point-wise features
459
+ of Fℓ are encoded from unaligned LRFs, so direct imple-
460
+ mentation of self-attention on Fℓ can cause feature inconsis-
461
+ tency during integration. To solve this problem, rigid trans-
462
+ formations between distinct LRFs are considered, which
463
+ are explicitly encoded and injected into point-wise relation
464
+ learning process. We begin by understanding the transfor-
465
+ mation between two LRFs. For any pair of local orthonormal
466
+ bases Mℓ
467
+ i and Mℓ
468
+ j, a rotation can be easily derived ∆Rji =
469
+ Mℓ
470
+ iMℓ
471
+ j
472
+ ⊤ and translation is defined as ∆tji = oℓ
473
+ i −oℓ
474
+ j, where
475
+ oℓ
476
+ i/j indicates the origin. In our work, the translation part is
477
+ intentionally ignored, where we show in the supplementary
478
+ material that by keeping both rotation and translation infor-
479
+ mation, the model performance decreases.
480
+ Although ∆Rji is invariant to rotations, we do not di-
481
+ rectly project it into the embedding space, as it is sensitive
482
+ to the order of matrix product: ∆Rji ̸= ∆Rij, giving in-
483
+ consistent rotation information when the product order is not
484
+ maintained. To address this issue, we construct our embed-
485
+ ding via the relative rotation angle ∆αji between Mℓ
486
+ i and
487
+ Mℓ
488
+ j, which is normally used in most PCR works (Yew and
489
+ Lee 2020; Pan, Cai, and Liu 2022) for evaluations. The rel-
490
+ ative rotation angle ∆αji is computed as:
491
+ ∆αji = arccos
492
+ �Trace (∆Rji) − 1
493
+ 2
494
+ � 180
495
+ π
496
+ ∈ [0, π],
497
+ (4)
498
+ where it is easy to see that ∆αji = ∆αij. We further apply
499
+ sinusoidal functions on ∆αji to generate N 2 pairs of angu-
500
+ lar embeddings eα ∈ RN×N×d for all N points as:
501
+
502
+ i,j,2k = sin
503
+ � ∆αji/tα
504
+ 100002k/d
505
+
506
+ , eα
507
+ i,j,2k+1 = cos
508
+ � ∆αji/tα
509
+ 100002k/d
510
+
511
+ ,
512
+ (5)
513
+ where tα controls the sensitivity to angle variations.
514
+ Finally, we inject eα into offset attention and learn intra-
515
+ frame aligned feature Fℓ
516
+ IAS via self-attention as follows:
517
+ Fℓ
518
+ IAS = φ
519
+
520
+ Fℓ
521
+ oa
522
+
523
+ + Fℓ, Fℓ
524
+ oa = Fℓ − ∥ softmax(Asa)∥1vsa,
525
+ Asa = Aattn
526
+ sa
527
+ + Arot
528
+ sa , Aattn
529
+ sa
530
+ = qsak⊤
531
+ sa, Arot
532
+ sa = qsa(eα
533
+ saWα
534
+ sa)⊤,
535
+ (6)
536
+ where qsa/ksa/vsa = FℓWq
537
+ sa/FlWk
538
+ sa/FlWv
539
+ sa, Wα
540
+ sa ∈
541
+ Rd×d is a linear projection to refine the learning of eα
542
+ sa, and
543
+ Asa is the attention logits. The same process can be per-
544
+ formed for Fg by swapping the index ℓ and g. Detailed il-
545
+ lustrations are shown in Fig. 2 (a).
546
+ Inter-frame Aligned Cross-attention.
547
+ Semantic infor-
548
+ mation exchange between Fℓ and Fg in the feature space
549
+ is implemented efficiently by cross-attention (Chen, Fan,
550
+ and Panda 2021). Since Fℓ and Fg are learned from differ-
551
+ ent coordinate systems, inter-frame transformations should
552
+ be considered for cross-consistency between Fℓ and Fg.
553
+ An illustration of the cross-attention module is shown in
554
+ Fig. 2 (b). Computation of inter-frame aligned feature Fℓ
555
+ IAC
556
+ via cross-attention follows a similar way as Eq. 6:
557
+ Fℓ
558
+ IAC = φ
559
+
560
+ Fℓ
561
+ oa
562
+
563
+ + Fℓ, Fℓ
564
+ oa = Fℓ − ∥ softmax(Aca)∥1vca,
565
+ Aca = Aattn
566
+ ca
567
+ + Arot
568
+ ca , Aattn
569
+ ca
570
+ = qcak⊤
571
+ ca, Arot
572
+ ca = qca(eα
573
+ caWα
574
+ ca)⊤,
575
+ (7)
576
+ where qca/kca/vca = FℓWq
577
+ ca/FgWk
578
+ ca/FgWv
579
+ ca. Aca is
580
+ cross-attention logits containing point-wise cross-relations
581
+ over point features defined across local and global scales.
582
+
583
+ ca ∈ RN×d is computed via Eq. 4 and Eq. 5 in terms of the
584
+ transformation between Mℓ
585
+ i and Mg. To this end, the geo-
586
+ metric features learned between local and global reference
587
+ frames can be aligned given eα
588
+ ca, leading to a consistent fea-
589
+ ture representation.
590
+
591
+ Attention-based Feature Integration.
592
+ Instead of simply
593
+ adding the information from both Fℓ and Fg, we integrate
594
+ information by incrementing attention logits. Specifically,
595
+ we apply self-attention on Fℓ with attention logits Asa
596
+ and cross-attention between Fℓ and Fg with attention log-
597
+ its Aca. We combine Asa and Aca via addition, so that en-
598
+ coded information of all point pairs from a local domain can
599
+ be enriched by the global context of the whole shape. Illus-
600
+ tration is shown in the supplementary material. The whole
601
+ process is formulated as follows:
602
+ U = φ (Foa) + Fℓ,
603
+ Foa = Fℓ − ∥softmax(Asa + Aca)∥1(vsa + vca).
604
+ (8)
605
+ Hence, intra-frame point relations can be compensated by
606
+ inter-frame information communication in a local-to-global
607
+ manner, which enriches the geometric representations.
608
+ 3.3
609
+ Deep Feature Registration
610
+ Correspondence mapping (Wang and Solomon 2019a; Pan,
611
+ Cai, and Liu 2022) plays an important role in PCR, and we
612
+ discuss that it is also critical for achieving RI in our design.
613
+ Specifically, although Fℓ and Fg are both rotation-invariant
614
+ by theory, different point sampling methods and the sign
615
+ ambiguity will cause the final feature not strictly rotation-
616
+ invariant. To solve this issue, we first examine the correspon-
617
+ dence map:
618
+ m (X, Y) =
619
+ exp
620
+
621
+ Φ1(Y)Φ2(X)⊤/t
622
+
623
+ �N
624
+ j=1 exp (Φ1(Y)Φ2(xj)⊤/t)
625
+ ,
626
+ (9)
627
+ where Φ1 and Φ2 are MLPs that project latent embeddings
628
+ X and Y to a shared space, and t controls the variation sen-
629
+ sitivity. It can be seen from Eq. 9 that the mapping function
630
+ m reveals feature similarities in the latent space, and it is
631
+ also an essential part for 3D point-level contrastive learning
632
+ in PointContrast (Xie et al. 2020) for the design of InfoNCE
633
+ losses (Van den Oord, Li, and Vinyals 2018), which have
634
+ been proven to be equivalent to maximize the mutual infor-
635
+ mation. Based on this observation, we propose a registration
636
+ loss function Lr = Lℓ
637
+ r + Lg
638
+ r, where Lℓ
639
+ r and Lg
640
+ r represent the
641
+ registration loss of (Fℓ,U) and (Fg,U). Mathematically, Lℓ
642
+ r
643
+ is defined as follows:
644
+ Lℓ
645
+ r = −
646
+
647
+ (i,j)∈M
648
+ log
649
+ exp
650
+
651
+ Φ1(Uj)Φ2(f ℓ
652
+ i )⊤/t
653
+
654
+
655
+ (·,k)∈M exp
656
+
657
+ Φ1(Uk)Φ2(f ℓ
658
+ i )⊤/t
659
+ �. (10)
660
+ The same rule is followed to compute Lg
661
+ r. Although we fol-
662
+ low the core idea of PointContrast, we differ from it in that
663
+ PointContrast defines positive samples based on feature cor-
664
+ respondences computed at the same layer level, while our
665
+ positive samples are defined across layers.
666
+ The intuition for the loss design is that the 3D shape is
667
+ forced to learn about its local region as it has to distinguish
668
+ it from other parts of different objects. Moreover, we would
669
+ like to maximize the mutual information between different
670
+ poses of the 3D shape, as features encoded from different
671
+ poses should represent the same object, which is very use-
672
+ ful in achieving RI in SO(3). Moreover, the mutual infor-
673
+ mation between Fℓ and Fg is implicitly maximized, such
674
+ Rotation Sensitive
675
+ z/z
676
+ z/SO(3)
677
+ SO(3)/SO(3)
678
+
679
+ PointNet (Qi et al. 2017a)
680
+ 89.2
681
+ 16.2
682
+ 75.5
683
+ 59.3
684
+ PoinNet++ (Qi et al. 2017b)
685
+ 89.3
686
+ 28.6
687
+ 85.0
688
+ 56.4
689
+ PCT (Guo et al. 2021)
690
+ 90.3
691
+ 37.2
692
+ 88.5
693
+ 51.3
694
+ Rotation Robust
695
+ z/z
696
+ z/SO(3)
697
+ SO(3)/SO(3)
698
+
699
+ Spherical CNN* (Esteves et al. 2018)
700
+ 88.9
701
+ 76.9
702
+ 86.9
703
+ 10
704
+ SFCNN (Rao, Lu, and Zhou 2019)
705
+ 91.4
706
+ 84.8
707
+ 90.1
708
+ 5.3
709
+ RIConv (Zhang et al. 2019)
710
+ 86.5
711
+ 86.4
712
+ 86.4
713
+ 0.1
714
+ ClusterNet (Chen et al. 2019)
715
+ 87.1
716
+ 87.1
717
+ 87.1
718
+ 0.0
719
+ PR-InvNet (Yu et al. 2020)
720
+ 89.2
721
+ 89.2
722
+ 89.2
723
+ 0.0
724
+ RI-GCN (Kim, Park, and Han 2020)
725
+ 89.5
726
+ 89.5
727
+ 89.5
728
+ 0.0
729
+ GCAConv (Zhang et al. 2020)
730
+ 89.0
731
+ 89.1
732
+ 89.2
733
+ 0.1
734
+ RI-Framework (Li et al. 2021b)
735
+ 89.4
736
+ 89.4
737
+ 89.3
738
+ 0.1
739
+ VN-DGCNN (Deng et al. 2021)
740
+ 89.5
741
+ 89.5
742
+ 90.2
743
+ 0.7
744
+ SGMNet (Xu et al. 2021)
745
+ 90.0
746
+ 90.0
747
+ 90.0
748
+ 0.0
749
+ Li et al. (2021a)
750
+ 90.2
751
+ 90.2
752
+ 90.2
753
+ 0.0
754
+ OrientedMP (Luo et al. 2022)
755
+ 88.4
756
+ 88.4
757
+ 88.9
758
+ 0.5
759
+ ELGANet (Gu et al. 2022)
760
+ 90.3
761
+ 90.3
762
+ 90.3
763
+ 0.0
764
+ Ours
765
+ 91.0
766
+ 91.0
767
+ 91.0
768
+ 0.0
769
+ Table 1: Classification results on ModelNet40 under rota-
770
+ tions. * denotes the input type as projected voxels of 2×642,
771
+ while the rest take raw points of 1024×3 as inputs. ∆ is the
772
+ absolute difference between z/SO(3) and SO(3)/SO(3).
773
+ that shared semantic information about geometric structures
774
+ can be learned, leading to a more geometrically accurate and
775
+ discriminative representation. More details about Lℓ
776
+ r can be
777
+ found in the supplementary material.
778
+ 4
779
+ Experiments
780
+ We evaluate our model on 3D shape classification, part seg-
781
+ mentation, and retrieval tasks under rotations, and exten-
782
+ sive experiments are conducted to analyze the network de-
783
+ sign. Detailed model architectures for the three tasks are
784
+ shown in the supplementary material. Our evaluating proto-
785
+ cols are the same as (Esteves et al. 2018): training and testing
786
+ the network under azimuthal rotations (z/z); training under
787
+ azimuthal rotations while testing under arbitrary rotations
788
+ (z/SO(3)); and training and testing under arbitrary rotations
789
+ (SO(3)/SO(3)).
790
+ 4.1
791
+ 3D Object Classification
792
+ Synthetic Dataset.
793
+ We first examine the model perfor-
794
+ mance on the synthetic ModelNet40 (Wu et al. 2015)
795
+ dataset. We sample 1024 points from each data with only
796
+ xyz coordinates as input features. Hyper-parameters for
797
+ training follow the same as (Guo et al. 2021), except
798
+ that points are downsampled in the order of (1024, 512,
799
+ 128) with feature dimensions of (3, 128, 256). We report
800
+ and compare our model performance with state-of-the-art
801
+ (SoTA) methods in Table 1. Both rotation sensitive and ro-
802
+ bust methods achieve great performance under z/z. How-
803
+ ever, the former could not generalize well to unseen rota-
804
+ tions. Rotation robust methods like Spherical CNN (Esteves
805
+ et al. 2018) and SFCNN (Rao, Lu, and Zhou 2019) achieve
806
+ competitive results under z/z, but their performance is not
807
+ consistent on z/SO(3) and SO(3)/SO(3) due to the imperfect
808
+ projection from points to voxels when using spherical so-
809
+ lutions. We outperform the recent proposed methods (Luo
810
+ et al. 2022; Xu et al. 2021; Deng et al. 2021) and achieve an
811
+ accuracy of 91.0%, proving the superiority of our framework
812
+ on classification.
813
+
814
+ Method
815
+ z/SO(3)
816
+ SO(3)/SO(3)
817
+
818
+ PointNet (Qi et al. 2017a)
819
+ 16.7
820
+ 54.7
821
+ 38.0
822
+ PointNet++ (Qi et al. 2017b)
823
+ 15.0
824
+ 47.4
825
+ 32.4
826
+ PCT (Guo et al. 2021)
827
+ 28.5
828
+ 45.8
829
+ 17.3
830
+ RIConv (Zhang et al. 2019)
831
+ 78.4
832
+ 78.1
833
+ 0.3
834
+ RI-GCN (Kim, Park, and Han 2020)
835
+ 80.5
836
+ 80.6
837
+ 0.1
838
+ GCAConv (Zhang et al. 2020)
839
+ 80.1
840
+ 80.3
841
+ 0.2
842
+ RI-Framework (Li et al. 2021b)
843
+ 79.8
844
+ 79.9
845
+ 0.1
846
+ LGR-Net (Zhao et al. 2022)
847
+ 81.2
848
+ 81.4
849
+ 0.2
850
+ VN-DGCNN (Deng et al. 2021)
851
+ 79.8
852
+ 80.3
853
+ 0.5
854
+ OrientedMP (Luo et al. 2022)
855
+ 76.7
856
+ 77.2
857
+ 0.5
858
+ Ours
859
+ 86.6
860
+ 86.3
861
+ 0.3
862
+ Table 2: Classification results on ScanObjectNN OBJ BG
863
+ under z/SO(3) and SO(3)/SO(3).
864
+ GT
865
+ Ours
866
+ RI-GCN
867
+ RIConv
868
+ VN-DGCNN
869
+ Figure 3: Segmentation comparisons on ShapeNetPart,
870
+ where ground truth (GT) samples are shown for refer-
871
+ ence. Red dotted circles indicate obvious failures on certain
872
+ classes, and purple circles denote the slight difference be-
873
+ tween our design and VN-DGCNN.
874
+ Real Dataset.
875
+ Experiments are also conducted on a real-
876
+ scanned dataset. ScanObjectNN (Uy et al. 2019) is a com-
877
+ monly used benchmark to explore the robustness to noisy
878
+ and deformed 3D objects with non-uniform surface density,
879
+ which includes 2,902 incomplete point clouds in 15 classes.
880
+ We use OBJ BG subset with the background noise and sam-
881
+ ple 1,024 points under z/SO(3) and SO(3)/SO(3). Table 2
882
+ shows that our model achieves the highest results with ex-
883
+ cellent consistency with random rotations.
884
+ 4.2
885
+ 3D Part Segmentation
886
+ Shape part segmentation is a more challenging task than ob-
887
+ ject classification. We use ShapeNetPart (Yi et al. 2016) for
888
+ evaluation, where we sample 2048 points with xyz coordi-
889
+ nates as model inputs. The training strategy is the same as
890
+ the classification task except that the training epoch num-
891
+ ber is 300. Part-averaged IoU (mIoU) is reported in Table
892
+ 3, and detailed per-class mIoU values are shown in the sup-
893
+ plementary material. Representative methods such as Point-
894
+ Method
895
+ z/SO(3)
896
+ SO(3)/SO(3)
897
+
898
+ PointNet (Qi et al. 2017a)
899
+ 38.0
900
+ 62.3
901
+ 24.3
902
+ PointNet++ (Qi et al. 2017b)
903
+ 48.3
904
+ 76.7
905
+ 28.4
906
+ PCT (Guo et al. 2021)
907
+ 38.5
908
+ 75.2
909
+ 36.7
910
+ RIConv (Zhang et al. 2019)
911
+ 75.3
912
+ 75.5
913
+ 0.2
914
+ RI-GCN (Kim, Park, and Han 2020)
915
+ 77.2
916
+ 77.3
917
+ 0.1
918
+ RI-Framework (Li et al. 2021b)
919
+ 79.2
920
+ 79.4
921
+ 0.2
922
+ LGR-Net (Zhao et al. 2022)
923
+ 80.0
924
+ 80.1
925
+ 0.1
926
+ VN-DGCNN (Deng et al. 2021)
927
+ 81.4
928
+ 81.4
929
+ 0.0
930
+ OrientedMP (Luo et al. 2022)
931
+ 80.1
932
+ 80.9
933
+ 0.8
934
+ Ours
935
+ 80.3
936
+ 80.4
937
+ 0.1
938
+ Table 3: Segmentation results on ShapeNetPart under
939
+ z/SO(3) and SO(3)/SO(3), where the second best results are
940
+ underlined.
941
+ Method
942
+ micro mAP
943
+ macro mAP
944
+ Score
945
+ Spherical CNN (Esteves et al. 2018)
946
+ 0.685
947
+ 0.444
948
+ 0.565
949
+ SFCNN (Rao, Lu, and Zhou 2019)
950
+ 0.705
951
+ 0.483
952
+ 0.594
953
+ GCAConv (Zhang et al. 2020)
954
+ 0.708
955
+ 0.490
956
+ 0.599
957
+ RI-Framework (Li et al. 2021b)
958
+ 0.707
959
+ 0.510
960
+ 0.609
961
+ Ours
962
+ 0.715
963
+ 0.510
964
+ 0.613
965
+ Table 4: Comparisons of SoTA methods on the 3D shape
966
+ retrieval task.
967
+ Net++ and PCT are vulnerable to rotations. Rotation robust
968
+ methods present competitive results under z/SO(3), where
969
+ we achieve the second best result of 80.3%. We give more
970
+ details of comparison between VN-DGCNN (Deng et al.
971
+ 2021) and our work in the supplementary material, where
972
+ our method performs better than VN-DGCNN for several
973
+ classes. Moreover, qualitative results shown in Fig. 3 present
974
+ that we can achieve visually better results than VN-DGCNN
975
+ in certain classes such as the airplane and car. More qualita-
976
+ tive results are shown in the supplementary material.
977
+ 4.3
978
+ 3D Shape Retrieval
979
+ We further conduct 3D shape retrieval experiments on
980
+ ShapeNetCore55 (Chang et al. 2015), which contains two
981
+ categories of datasets: normal and perturbed. We only use
982
+ the perturbed part to validate our model performance under
983
+ rotations. We combine the training and validation sets and
984
+ validate our method on the testing set following the training
985
+ policy of (Esteves et al. 2018). Experimental results are re-
986
+ ported in Table 4, where the final score is the average value
987
+ of micro and macro mean average of precision (mAP) as
988
+ in (Savva et al. 2017). Similar to the classification task, our
989
+ method achieves SoTA performance.
990
+ 4.4
991
+ Ablation Study
992
+ Effectiveness of Transformer Designs.
993
+ We examine the
994
+ effectiveness of our transformer design by conducting clas-
995
+ sification experiments under z/SO(3). We first ablate one or
996
+ both of the angular embeddings and report the results in Ta-
997
+ ble 5 (models A, B, and C). Model B performs better than
998
+ model C by 0.4%, which validates our design of feature in-
999
+ tegration where Mℓ
1000
+ i is used as the main source of informa-
1001
+ tion. When both angular embeddings are applied, the best
1002
+ result is achieved (i.e., 91.0%). Moreover, we validate our
1003
+ discussion in Section 3.2 by comparing models D and E. We
1004
+
1005
+ .:Model
1006
+
1007
+ sa
1008
+
1009
+ ca
1010
+ Fg∗
1011
+ Asa + Aca
1012
+ Lℓ
1013
+ r
1014
+ Lg
1015
+ r
1016
+ Acc.
1017
+ A
1018
+
1019
+
1020
+
1021
+ 90.0
1022
+ B
1023
+
1024
+
1025
+
1026
+
1027
+ 90.6
1028
+ C
1029
+
1030
+
1031
+
1032
+
1033
+ 90.2
1034
+ D
1035
+
1036
+
1037
+
1038
+
1039
+
1040
+
1041
+ 90.2
1042
+ E
1043
+
1044
+
1045
+
1046
+
1047
+ 90.4
1048
+ F
1049
+
1050
+
1051
+
1052
+ 90.0
1053
+ G
1054
+
1055
+
1056
+
1057
+
1058
+ 90.2
1059
+ H
1060
+
1061
+
1062
+
1063
+
1064
+ 90.6
1065
+ Ours
1066
+
1067
+
1068
+
1069
+
1070
+
1071
+ 91.0
1072
+ Table 5: Module analysis of AIT and loss functions. Fg∗
1073
+ means encoding Fg via Intra-frame Aligned Self-attention.
1074
+ demonstrate in model D that when encoding Fg in the same
1075
+ way as Fℓ, the model performance decreases, which indi-
1076
+ cates that encoding Fg via self-attention will increase the
1077
+ model overfitting. More analyses can be found in the supple-
1078
+ mentary material. Finally, we examine the effectiveness of
1079
+ our attention logits-based integration scheme by comparing
1080
+ our model with the conventional method (model E), which
1081
+ applies self- and cross-attention sequentially and repeatedly.
1082
+ We observe that our result is better than model E by 0.6%,
1083
+ indicating that our design is more effective.
1084
+ Registration Loss.
1085
+ We sequentially ablate Lg
1086
+ r and Lℓ
1087
+ r
1088
+ (models F, G, and H) to check the effectiveness of our reg-
1089
+ istration loss deign. Results in Table 5 demonstrate that we
1090
+ can still achieve a satisfactory result of 90.0% without fea-
1091
+ ture registration. Individual application of Lg
1092
+ r and Lℓ
1093
+ r shows
1094
+ the improvement when forcing the final representation to be
1095
+ close to rotation-invariant features. Moreover, it can be seen
1096
+ that model H performs better than model G, which indicates
1097
+ that intermediate features learned from the global scale are
1098
+ important for shape classification. The best model perfor-
1099
+ mance is hence achieved by applying both losses.
1100
+ Noise Robustness.
1101
+ In real-world applications, raw point
1102
+ clouds contain noisy signals. We conduct experiments to
1103
+ present the model robustness to noise under z/SO(3). Two
1104
+ experiments are conducted: (1) We sample and add Gaus-
1105
+ sian noise of zero mean and varying standard deviations
1106
+ N(0, σ2) to the input data; (2) We add outliers sampled
1107
+ from a unit sphere to each object. As shown in Fig. 4 (left),
1108
+ we achieve on par results to RI-Framework when std is low,
1109
+ while we perform better while std increases, indicating that
1110
+ our model is robust against high levels of noise. Besides,
1111
+ as the number of noisy points increases, most methods are
1112
+ heavily affected while we can still achieve good results.
1113
+ Visualization of Rotation Invariance.
1114
+ We further ex-
1115
+ amine RI of learned features. Specifically, we use Grad-
1116
+ CAM (Selvaraju et al. 2017) to check how the model pays
1117
+ attention to different parts of data samples under different
1118
+ rotations. Results are reported in Fig. 5 with correspondence
1119
+ between gradients and colors shown on the right. RI-GCN
1120
+ presents a good result, but its behavior is not consistent over
1121
+ some classes (e.g., vase and plant) and it does not pay atten-
1122
+ tion to regions that are critical for classification (see toilet),
1123
+ showing inferior performance to ours. PointNet++ shows no
1124
+ 0.00
1125
+ 0.01
1126
+ 0.02
1127
+ 0.03
1128
+ 0.04
1129
+ 0.05
1130
+ std of noise
1131
+ 20
1132
+ 30
1133
+ 40
1134
+ 50
1135
+ 60
1136
+ 70
1137
+ 80
1138
+ 90
1139
+ Accuracy (%)
1140
+ RIConv
1141
+ RI-GCN
1142
+ RI-framework
1143
+ SRINet
1144
+ ours
1145
+ 0
1146
+ 10
1147
+ 20
1148
+ 30
1149
+ 40
1150
+ 50
1151
+ # of noisy points
1152
+ 30
1153
+ 40
1154
+ 50
1155
+ 60
1156
+ 70
1157
+ 80
1158
+ 90
1159
+ Figure 4: Left: Results on Gaussian noise of zero mean and
1160
+ variant standard deviation values. Right: Results on differ-
1161
+ ent numbers of noisy points.
1162
+ high
1163
+ low
1164
+ high
1165
+ low
1166
+ airplane
1167
+ guitar
1168
+ vase
1169
+ plant
1170
+ toilet
1171
+ PointNet++
1172
+ RI-GCN
1173
+ high
1174
+ low
1175
+ Ours
1176
+ Figure 5: Network attention on PointNet++ (top), RI-GCN
1177
+ (mid) and our model (bot).
1178
+ resistance to rotations, while our method exhibits a consis-
1179
+ tent gradient distribution over different parts with random
1180
+ rotations, indicating our network is not affected by rotations.
1181
+ 5
1182
+ Conclusion
1183
+ In this work, we rethink and investigate the close relation be-
1184
+ tween rotation invariance and point cloud registration, based
1185
+ on which we propose a PCR-cored learning framework with
1186
+ three stages. With a pair of rotation-invariant shape descrip-
1187
+ tors constructed from local and global scales, a comprehen-
1188
+ sive learning and feature integration module is proposed,
1189
+ Aligned Integration Transformer, to simultaneously effec-
1190
+ tively align and integrate shape codes via self- and cross-
1191
+ attentions. To further preserve rotation invariance in the fi-
1192
+ nal feature representation, a registration loss is proposed to
1193
+ align it with intermediate features, where shared semantic
1194
+ knowledge of geometric parts is also extracted. Extensive
1195
+ experiments demonstrated the superiority and robustness of
1196
+ our designs. In future work, we will examine efficient meth-
1197
+ ods for invariance learning on large-scale point clouds.
1198
+
1199
+ ..:References
1200
+ Afham, M.; Dissanayake, I.; Dissanayake, D.; Dharmasiri,
1201
+ A.; Thilakarathna, K.; and Rodrigo, R. 2022. Crosspoint:
1202
+ Self-supervised cross-modal contrastive learning for 3D
1203
+ point cloud understanding. In CVPR.
1204
+ Besl, P. J.; and McKay, N. D. 1992. Method for registration
1205
+ of 3-D shapes. In Sensor fusion.
1206
+ Bro, R.; Acar, E.; and Kolda, T. G. 2008. Resolving the sign
1207
+ ambiguity in the singular value decomposition. In JoC.
1208
+ Chang, A. X.; Funkhouser, T.; Guibas, L.; Hanrahan, P.;
1209
+ Huang, Q.; Li, Z.; Savarese, S.; Savva, M.; Song, S.; Su,
1210
+ H.; et al. 2015. Shapenet: An information-rich 3D model
1211
+ repository. In arXiv preprint.
1212
+ Chen, C.; Li, G.; Xu, R.; Chen, T.; Wang, M.; and Lin, L.
1213
+ 2019.
1214
+ Clusternet: Deep hierarchical cluster network with
1215
+ rigorously rotation-invariant representation for point cloud
1216
+ analysis. In CVPR.
1217
+ Chen, C.-F. R.; Fan, Q.; and Panda, R. 2021.
1218
+ Crossvit:
1219
+ Cross-attention multi-scale vision transformer for image
1220
+ classification. In ICCV.
1221
+ Cheng, R.; Razani, R.; Taghavi, E.; Li, E.; and Liu, B. 2021.
1222
+ (AF)2-S3Net: Attentive feature fusion with adaptive fea-
1223
+ ture selection for sparse semantic segmentation network. In
1224
+ CVPR.
1225
+ Cohen, T. S.; Geiger, M.; K¨ohler, J.; and Welling, M. 2018.
1226
+ Spherical CNNs. In ICLR.
1227
+ Deng, C.; Litany, O.; Duan, Y.; Poulenard, A.; Tagliasac-
1228
+ chi, A.; and Guibas, L. J. 2021. Vector neurons: A general
1229
+ framework for SO (3)-equivariant networks. In ICCV.
1230
+ Deng, H.; Birdal, T.; and Ilic, S. 2018. PPFNet: Global con-
1231
+ text aware local features for robust 3d point matching. In
1232
+ CVPR.
1233
+ Dosovitskiy, A.; Beyer, L.; Kolesnikov, A.; Weissenborn,
1234
+ D.; Zhai, X.; Unterthiner, T.; Dehghani, M.; Minderer, M.;
1235
+ Heigold, G.; Gelly, S.; Uszkoreit, J.; and Houlsby, N. 2021.
1236
+ An Image is Worth 16x16 Words: Transformers for Image
1237
+ Recognition at Scale. In ICLR.
1238
+ Drost, B.; Ulrich, M.; Navab, N.; and Ilic, S. 2010. Model
1239
+ globally, match locally: Efficient and robust 3D object
1240
+ recognition. In CVPR.
1241
+ Esteves, C.; Allen-Blanchette, C.; Makadia, A.; and Dani-
1242
+ ilidis, K. 2018. Learning SO(3) equivariant representations
1243
+ with spherical CNNs. In ECCV.
1244
+ Gu, R.; Wu, Q.; Li, Y.; Kang, W.; Ng, W.; and Wang, Z.
1245
+ 2022.
1246
+ Enhanced local and global learning for rotation-
1247
+ invariant point cloud representation. In MultiMedia.
1248
+ Guo, M.-H.; Cai, J.-X.; Liu, Z.-N.; Mu, T.-J.; Martin, R. R.;
1249
+ and Hu, S.-M. 2021. PCT: Point cloud transformer. In CVM.
1250
+ He, K.; Fan, H.; Wu, Y.; Xie, S.; and Girshick, R. 2020.
1251
+ Momentum contrast for unsupervised visual representation
1252
+ learning. In CVPR.
1253
+ Jaderberg, M.; Simonyan, K.; Zisserman, A.; et al. 2015.
1254
+ Spatial transformer networks. In NeurIPS.
1255
+ Jing, B.; Eismann, S.; Suriana, P.; Townshend, R. J.; and
1256
+ Dror, R. 2020. Learning from protein structure with geo-
1257
+ metric vector perceptrons. In ICLR.
1258
+ Kim, S.; Park, J.; and Han, B. 2020.
1259
+ Rotation-Invariant
1260
+ Local-to-Global Representation Learning for 3D Point
1261
+ Cloud. In NeurIPS.
1262
+ Li, F.; Fujiwara, K.; Okura, F.; and Matsushita, Y. 2021a. A
1263
+ Closer Look at Rotation-Invariant Deep Point Cloud Analy-
1264
+ sis. In ICCV.
1265
+ Li, X.; Li, R.; Chen, G.; Fu, C.-W.; Cohen-Or, D.; and Heng,
1266
+ P.-A. 2021b. A rotation-invariant framework for deep point
1267
+ cloud analysis. In TVCG.
1268
+ Liu, Z.; Lin, Y.; Cao, Y.; Hu, H.; Wei, Y.; Zhang, Z.; Lin,
1269
+ S.; and Guo, B. 2021. Swin transformer: Hierarchical vision
1270
+ transformer using shifted windows. In ICCV.
1271
+ Luo, S.; Li, J.; Guan, J.; Su, Y.; Cheng, C.; Peng, J.; and
1272
+ Ma, J. 2022. Equivariant Point Cloud Analysis via Learning
1273
+ Orientations for Message Passing. In CVPR.
1274
+ Pan, L.; Cai, Z.; and Liu, Z. 2022. Robust Partial-to-Partial
1275
+ Point Cloud Registration in a Full Range. In IJCV.
1276
+ Qi, C. R.; Su, H.; Mo, K.; and Guibas, L. J. 2017a. Point-
1277
+ Net: Deep learning on point sets for 3D classification and
1278
+ segmentation. In CVPR.
1279
+ Qi, C. R.; Yi, L.; Su, H.; and Guibas, L. J. 2017b. Point-
1280
+ Net++: Deep hierarchical feature learning on point sets in a
1281
+ metric space. In NeurIPS.
1282
+ Qin, Z.; Yu, H.; Wang, C.; Guo, Y.; Peng, Y.; and Xu, K.
1283
+ 2022.
1284
+ Geometric Transformer for Fast and Robust Point
1285
+ Cloud Registration. In CVPR.
1286
+ Rao, Y.; Lu, J.; and Zhou, J. 2019. Spherical fractal con-
1287
+ volutional neural networks for point cloud recognition. In
1288
+ CVPR.
1289
+ Sanghi, A. 2020. Info3d: Representation learning on 3D ob-
1290
+ jects using mutual information maximization and contrastive
1291
+ learning. In ECCV.
1292
+ Savva, M.; Yu, F.; Su, H.; Kanezaki, A.; Furuya, T.;
1293
+ Ohbuchi, R.; Zhou, Z.; Yu, R.; Bai, S.; Bai, X.; et al. 2017.
1294
+ Large-scale 3D shape retrieval from ShapeNet Core55:
1295
+ SHREC’17 track. In workshop of 3DOR.
1296
+ Selvaraju, R. R.; Cogswell, M.; Das, A.; Vedantam, R.;
1297
+ Parikh, D.; and Batra, D. 2017. Grad-cam: Visual expla-
1298
+ nations from deep networks via gradient-based localization.
1299
+ In ICCV.
1300
+ Spezialetti, R.; Stella, F.; Marcon, M.; Silva, L.; Salti, S.;
1301
+ and Di Stefano, L. 2020. Learning to orient surfaces by self-
1302
+ supervised spherical cnns. In NeurIPS.
1303
+ Sun, W.; Tagliasacchi, A.; Deng, B.; Sabour, S.; Yazdani,
1304
+ S.; Hinton, G. E.; and Yi, K. M. 2021. Canonical Capsules:
1305
+ Self-Supervised Capsules in Canonical Pose. In NeurIPS.
1306
+ Thomas, H. 2020.
1307
+ Rotation-Invariant Point Convolution
1308
+ With Multiple Equivariant Alignments. In 3DV.
1309
+ Tombari, F.; Salti, S.; and Stefano, L. D. 2010. Unique signa-
1310
+ tures of histograms for local surface description. In ECCV.
1311
+ Uy, M. A.; Pham, Q.-H.; Hua, B.-S.; Nguyen, D. T.; and
1312
+ Yeung, S.-K. 2019. Revisiting Point Cloud Classification: A
1313
+ New Benchmark Dataset and Classification Model on Real-
1314
+ World Data. In ICCV.
1315
+
1316
+ Van den Oord, A.; Li, Y.; and Vinyals, O. 2018. Represen-
1317
+ tation learning with contrastive predictive coding. In arXiv
1318
+ e-prints.
1319
+ Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones,
1320
+ L.; Gomez, A. N.; Kaiser, Ł.; and Polosukhin, I. 2017. At-
1321
+ tention is all you need. In NeurIPS.
1322
+ Wang, Y.; and Solomon, J. M. 2019a. Deep closest point:
1323
+ Learning representations for point cloud registration.
1324
+ In
1325
+ ICCV.
1326
+ Wang, Y.; and Solomon, J. M. 2019b. Prnet: Self-supervised
1327
+ learning for partial-to-partial registration. In NeurIPS.
1328
+ Wu, Z.; Song, S.; Khosla, A.; Yu, F.; Zhang, L.; Tang, X.;
1329
+ and Xiao, J. 2015. 3D shapenets: A deep representation for
1330
+ volumetric shapes. In CVPR.
1331
+ Xie, S.; Gu, J.; Guo, D.; Qi, C. R.; Guibas, L.; and Litany, O.
1332
+ 2020. Pointcontrast: Unsupervised pre-training for 3d point
1333
+ cloud understanding. In ECCV.
1334
+ Xu, J.; Tang, X.; Zhu, Y.; Sun, J.; and Pu, S. 2021. SGM-
1335
+ Net: Learning Rotation-Invariant Point Cloud Representa-
1336
+ tions via Sorted Gram Matrix. In ICCV.
1337
+ Yew, Z. J.; and Lee, G. H. 2020. Rpm-net: Robust point
1338
+ matching using learned features. In CVPR.
1339
+ Yi, L.; Kim, V. G.; Ceylan, D.; Shen, I.-C.; Yan, M.; Su,
1340
+ H.; Lu, C.; Huang, Q.; Sheffer, A.; and Guibas, L. 2016. A
1341
+ scalable active framework for region annotation in 3D shape
1342
+ collections. In ACM ToG.
1343
+ Yu, H.; Li, F.; Saleh, M.; Busam, B.; and Ilic, S. 2021a.
1344
+ CoFiNet: Reliable Coarse-to-fine Correspondences for Ro-
1345
+ bust PointCloud Registration. In NeurIPS.
1346
+ Yu, J.; Zhang, C.; Wang, H.; Zhang, D.; Song, Y.; Xiang, T.;
1347
+ Liu, D.; and Cai, W. 2021b. 3D Medical Point Transformer:
1348
+ Introducing Convolution to Attention Networks for Medical
1349
+ Point Cloud Analysis. In arXiv preprint.
1350
+ Yu, R.; Wei, X.; Tombari, F.; and Sun, J. 2020.
1351
+ Deep
1352
+ Positional and Relational Feature Learning for Rotation-
1353
+ Invariant Point Cloud Analysis. In ECCV.
1354
+ Yuan, W.; Eckart, B.; Kim, K.; Jampani, V.; Fox, D.; and
1355
+ Kautz, J. 2020. Deepgmr: Learning latent gaussian mixture
1356
+ models for registration. In ECCV.
1357
+ Zhang, C.; Yu, J.; Song, Y.; and Cai, W. 2021. Exploiting
1358
+ Edge-Oriented Reasoning for 3D Point-Based Scene Graph
1359
+ Analysis. In CVPR.
1360
+ Zhang, Z.; Hua, B.-S.; Chen, W.; Tian, Y.; and Yeung, S.-K.
1361
+ 2020. Global context aware convolutions for 3D point cloud
1362
+ understanding. In 3DV.
1363
+ Zhang, Z.; Hua, B.-S.; Rosen, D. W.; and Yeung, S.-K. 2019.
1364
+ Rotation invariant convolutions for 3D point clouds deep
1365
+ learning. In 3DV.
1366
+ Zhao, C.; Yang, J.; Xiong, X.; Zhu, A.; Cao, Z.; and Li, X.
1367
+ 2022. Rotation invariant point cloud analysis: Where local
1368
+ geometry meets global topology. In Pattern Recognition.
1369
+ Zhao, H.; Jiang, L.; Jia, J.; Torr, P. H.; and Koltun, V. 2021.
1370
+ Point transformer. In ICCV.
1371
+
1372
+ Rethinking Rotation Invariance with Point Cloud Registration
1373
+ **Supplementary Material**
1374
+ Jianhui Yu, Chaoyi Zhang, Weidong Cai
1375
+ School of Computer Science, University of Sydney, Australia
1376
+ {jianhui.yu, chaoyi.zhang, tom.cai}@sydney.edu.au
1377
+ Mathematical Proofs
1378
+ Proof of Rotation Equivariance of Orthonormal Basis
1379
+ Mℓ
1380
+ i of LRFs.
1381
+ We prove the rotation equivariance of Mℓ
1382
+ i
1383
+ designed for LRFs as mentioned in Section 3.1 of the main
1384
+ work. Given a random rotation matrix R ∈ R3×3, it is easy
1385
+ to derive that #»
1386
+ xiℓ is equivariant to rotations given the rotated
1387
+ version #»
1388
+ xiℓ
1389
+ ,rot:
1390
+
1391
+ xi
1392
+
1393
+ ,rot =
1394
+ Rpi − Rpm
1395
+ ∥Rpi − Rpm∥2
1396
+ =
1397
+ R(pi − pm)
1398
+
1399
+ (R(pi − pm))⊤ R(pi − pm)
1400
+ =
1401
+ R# »
1402
+ pmpi
1403
+ �# »
1404
+ pmpi⊤R⊤R# »
1405
+ pmpi
1406
+ = R
1407
+ # »
1408
+ pmpi
1409
+ ∥# »
1410
+ pmpi∥2
1411
+ = R#»
1412
+ xi
1413
+ ℓ,
1414
+ (1)
1415
+ where the subscript rot represents the axis after rotations.
1416
+ Moreover, Σℓ
1417
+ i from Eq. (1) of the main work after rotations
1418
+ can be represented as follows:
1419
+ Σℓ
1420
+ i,rot =
1421
+ |N (pi)|
1422
+
1423
+ j=1
1424
+ αjR# »
1425
+ pipj # »
1426
+ pipj
1427
+ ⊤R⊤
1428
+ = R
1429
+
1430
+
1431
+ |N (pi)|
1432
+
1433
+ j=1
1434
+ αj # »
1435
+ pipj # »
1436
+ pipj
1437
+
1438
+
1439
+ � R⊤ = RΣℓ
1440
+ iR⊤.
1441
+ (2)
1442
+ As mentioned in the main work, eigenvalue decomposition
1443
+ can be directly applied to Σℓ
1444
+ i, resulting in the following ex-
1445
+ pressions:
1446
+ RΣℓ
1447
+ iR⊤ = RUℓ
1448
+ iΛℓ
1449
+ iUℓ
1450
+ i
1451
+ ⊤R⊤ =
1452
+
1453
+ RUℓ
1454
+ i
1455
+
1456
+ Λℓ
1457
+ i
1458
+
1459
+ RUℓ
1460
+ i
1461
+ �⊤ . (3)
1462
+ Since #»
1463
+ ziℓ is defined to have the same direction as the eigen-
1464
+ vector with the smallest eigenvalue, after rotation, #»
1465
+ ziℓ
1466
+ ,rot =
1467
+ R#»
1468
+ ziℓ. Thus, the rotated y-axis is:
1469
+
1470
+ yi
1471
+
1472
+ ,rot = #»
1473
+ zi
1474
+
1475
+ ,rot × #»
1476
+ xi
1477
+
1478
+ ,rot = R#»
1479
+ zi
1480
+ ℓ × R#»
1481
+ xi
1482
+
1483
+ = det (R)
1484
+
1485
+ R−1�⊤ �#»
1486
+ zi
1487
+ ℓ × #»
1488
+ xi
1489
+ ℓ�
1490
+ = R
1491
+ �#»
1492
+ zi
1493
+ ℓ × #»
1494
+ xi
1495
+ ℓ�
1496
+ = R#»
1497
+ yi
1498
+ ℓ.
1499
+ (4)
1500
+ Since all basis vectors are rotation-equivariant, the local or-
1501
+ thonormal basis Mℓ
1502
+ i = [#»
1503
+ xiℓ, #»
1504
+ yiℓ, #»
1505
+ ziℓ] is rotation-equivariant.
1506
+ Copyright © 2023, Association for the Advancement of Artificial
1507
+ Intelligence (www.aaai.org). All rights reserved.
1508
+ Proof of Rotation Invariance of Local Shape Descriptors
1509
+ pℓ
1510
+ ij.
1511
+ Here, we show the rotation invariance of local shape
1512
+ descriptors pℓ
1513
+ ij introduced in Section 3.1 of the main work.
1514
+ Based on the proof shown above, it is easy to show the rota-
1515
+ tion invariance of pℓ
1516
+ ij after rotation R:
1517
+ pℓ
1518
+ ij,rot = # »
1519
+ pipj
1520
+
1521
+ ,rotMℓ
1522
+ i,rot = (R# »
1523
+ pipj)⊤ �
1524
+ RMℓ
1525
+ i
1526
+
1527
+ = # »
1528
+ pipj
1529
+ ⊤Mℓ
1530
+ i = pℓ
1531
+ ij.
1532
+ (5)
1533
+ Model Details
1534
+ Architectures.
1535
+ The model overview for the 3D classifica-
1536
+ tion, segmentation, and retrieval tasks is shown in Fig. 1,
1537
+ where details of each module design are explained in Sec-
1538
+ tion 3 of the main work. The architecture of attention-based
1539
+ feature integration module is illustrated in Fig. 2. Please re-
1540
+ fer to Section 3.2 of the main work for more details.
1541
+ Fg in AIT.
1542
+ As mentioned in Section 3.2, to alleviate the
1543
+ model overfitting we do not apply self-attention on Fg. We
1544
+ argue that since we use global shape information as the sup-
1545
+ plementary material to assist local shape learning, this im-
1546
+ plementation allow lower-level information to flow effec-
1547
+ tively across layers to help the learning of higher-level lo-
1548
+ cal shape features, which could reduce the model overfit-
1549
+ ting. We encode Fℓ in AIT blocks, as abstracting informa-
1550
+ tion from local structures can increase the model’s ability on
1551
+ fine-grained pattern recognition and generalizability to com-
1552
+ plex scenes (Qi et al. 2017b). Hence, we find that without
1553
+ applying learnable modules on Fg is beneficial to our model
1554
+ performance.
1555
+ Registration Loss.
1556
+ In this this part, we first explain the
1557
+ benefits of applying the registration loss to preserve the ro-
1558
+ tation invariance. We then give more details about Eq. (10)
1559
+ in the main work.
1560
+ Suppose Uℓ and Ug are local and global part information
1561
+ of the final integrated feature U. By maximizing the mutual
1562
+ information between (U, Fℓ) and (U, Fg), (Uℓ, Fℓ) and
1563
+ (Ug, Fg) are implicitly maximized. In this case, the shared
1564
+ geometric information between the local Fℓ/global Fg and
1565
+ the integrated domain U are refined, increasing the repre-
1566
+ sentation power of U. Besides, the maximized similarities of
1567
+ (Uℓ, Fℓ) and (Ug, Fg) also tend to learn rotation invariance
1568
+ in an unsupervised manner. Specifically, although Fℓ/Uℓ en-
1569
+ codes local patches with different poses (since LRFs are
1570
+ arXiv:2301.00149v1 [cs.CV] 31 Dec 2022
1571
+
1572
+ airplane
1573
+ car
1574
+ chair
1575
+
1576
+ toilet
1577
+ Rotational
1578
+ Embeddin
1579
+ g
1580
+
1581
+ Shape Retrieval
1582
+ Classification
1583
+ GRF
1584
+ LRF
1585
+ SA x 2
1586
+ SA x 2
1587
+ AIT x 4
1588
+ U
1589
+ 𝑳𝒓
1590
+ "
1591
+ 𝑳𝒓ℓ
1592
+ MLP
1593
+ max
1594
+ Fℓ
1595
+ F$
1596
+ MLP
1597
+ Rotation-Invariant
1598
+ Shape Encoding
1599
+ Aligned Feature
1600
+ Integration
1601
+ Deep Feature
1602
+ Registration
1603
+ (a) classification/retrieval
1604
+ GRF
1605
+ LRF
1606
+ SA x 2
1607
+ SA x 2
1608
+ AIT x 4
1609
+ U
1610
+ 𝑳𝒓
1611
+ "
1612
+ 𝑳𝒓ℓ
1613
+ MLP
1614
+ Fℓ
1615
+ F$
1616
+ MLP
1617
+ FP x 2
1618
+ Rotational
1619
+ Embeddin
1620
+ g
1621
+ Rotation-Invariant
1622
+ Shape Encoding
1623
+ Aligned Feature
1624
+ Integration
1625
+ Deep Feature
1626
+ Registration
1627
+ Part Segmentation
1628
+ (b) segmentation
1629
+ Figure 1: Model overviews for (a) classification / retrieval
1630
+ and (b) segmentation. GRF: global reference frame; LRF:
1631
+ local reference frame; SA: set abstraction; AIT: Aligned In-
1632
+ tegration Transformer; and FP: forward passing.
1633
+ 𝑾!"
1634
+ #
1635
+ 𝑾!"
1636
+ $
1637
+ 𝑾!"
1638
+ %
1639
+ 𝐤!": 𝑁×𝑑
1640
+ 𝐪!": 𝑁×𝑑
1641
+ 𝐯!": 𝑁×𝐶
1642
+ 𝐤#": 𝑁×𝑑
1643
+ 𝐯#": 𝑁×𝐶
1644
+ 𝑾&"
1645
+ $
1646
+ 𝑾&"
1647
+ #
1648
+ 𝑭ℓ
1649
+ 𝑭(
1650
+ 𝑨𝒔𝒆𝒍𝒇
1651
+ 𝒂𝒕𝒕𝒏: 𝑁×𝑁
1652
+ 𝑨𝒔𝒆𝒍𝒇
1653
+ 𝑨𝒄𝒓𝒐𝒔𝒔
1654
+ 𝒓𝒐𝒕
1655
+ : 𝑁×𝑁
1656
+ 𝑨𝒄𝒓𝒐𝒔𝒔
1657
+ 𝑨𝒔𝒉𝒂𝒓𝒆𝒅: 𝑁×𝑁
1658
+ 𝑨𝒔𝒆𝒍𝒇
1659
+ 𝒓𝒐𝒕 : 𝑁×𝑁 𝑨𝒄𝒓𝒐𝒔𝒔
1660
+ 𝒂𝒕𝒕𝒏 : 𝑁×𝑁
1661
+ 𝑭0"
1662
+ 𝐚𝐝𝐝𝐢𝐭𝐢𝐨𝐧
1663
+ 𝐦𝐚𝐭𝐫𝐢𝐱 𝐩𝐫𝐨𝐝𝐮𝐜𝐭
1664
+ 𝐬𝐮𝐛𝐭𝐫𝐚𝐜𝐭𝐢𝐨𝐧
1665
+ Figure 2: Illustrations of attention-based feature integra-
1666
+ tion, where blue and green boxes indicate self- and cross-
1667
+ attention. Brown, gold and purple colored components cor-
1668
+ respond to v, k and q implementations.
1669
+ unaligned), and Fg/Ug encodes the whole 3D object in a
1670
+ canonical pose, their feature similarity should be enforced
1671
+ to be similar as they represent the same 3D object, no matter
1672
+ what their poses are. Moreover, the mutual information be-
1673
+ tween a local scale of a 3D object and a global scale of the
1674
+ same object is maximized, which embeds U with more ac-
1675
+ curate geometric information to distinguish it from objects
1676
+ in different classes.
1677
+ We then explain the symbols in Eq. (10). M stands for
1678
+ the set of all B × N pairs of positive samples across mini-
1679
+ batches, i.e., M = {(0, 0), ..., (B ×N, B ×N)}, where B is
1680
+ the number of batch size and N is the number of points. The
1681
+ point feature Uk is the set of negative keys where (·, k) ∈ M
1682
+ and k ̸= j. Note that since the registration loss function is
1683
+ applied to both Fℓ and Fg, the point features of the same
1684
+ point encoded from the local and global scales are pushed
1685
+ close to each other, where the mutual information is in-
1686
+ creased such that shared geometric information can be dis-
1687
+ covered across the local and global scales.
1688
+ Training Details.
1689
+ For all three tasks, we set the batch
1690
+ size to 32 for training and 16 for testing. We use farthest
1691
+ point sampling to re-sample the points from the initial 10k
1692
+ points to 1024 points for classification and retrieval and 2048
1693
+ points for segmentation. Random point translation within
1694
+ [−0.2, 0.2] and rescaling within [0.67, 1.5] were adopted for
1695
+ augmentation. We trained the model for 250 epochs with
1696
+ tα = 15 and t = 0.017. SGD is adopted as the optimizer,
1697
+ where the learning rate was set to 1e-2 with momentum of
1698
+ 0.9 and weight decay of 1e-4. Cosine annealing was applied
1699
+ to reschedule the learning rate for each epoch. For classi-
1700
+ fication and retrieval, we used one RTX2080Ti GPU with
1701
+ PyTorch for model implementation, and we used two GPUs
1702
+ for the segmentation task. The normal vector information is
1703
+ ignored for all experiments.
1704
+ More Analysis Experiments
1705
+ Influence of Randomness.
1706
+ We report the variance and
1707
+ mean values of each model in Table 5 of the main work to
1708
+ derive a more accurate and reliable estimate of our model
1709
+ performance. We hence report the variance and mean values
1710
+ of performance of each model in Table 5 on ModelNet40
1711
+ with 5 training rounds. As shown in Table 1, we can see
1712
+ that even for our model weights with the lowest performance
1713
+ 90.6% (among our five repeated runs), it still surpasses the
1714
+ highest performance among models from A to H.
1715
+ Model
1716
+ A
1717
+ B
1718
+ C
1719
+ Acc. (%)
1720
+ 89.8±0.2
1721
+ 90.4±0.2
1722
+ 90.1±0.1
1723
+ Model
1724
+ D
1725
+ E
1726
+ F
1727
+ Acc. (%)
1728
+ 89.8±0.4
1729
+ 90.1± 0.3
1730
+ 89.6±0.4
1731
+ Model
1732
+ G
1733
+ H
1734
+ Best
1735
+ Acc. (%)
1736
+ 90.0±0.2
1737
+ 90.3±0.3
1738
+ 90.8±0.2
1739
+ Table 1: Variance and Mean values of different model per-
1740
+ formances on ModelNet40 with z/SO(3).
1741
+ Point Re-sampling and Down-sampling.
1742
+ We examine
1743
+ different point re-sampling strategies from the initial 10k
1744
+
1745
+ input points down to 1024 input points. Experimental re-
1746
+ sults of applying different sampling techniques on Model-
1747
+ Net40 are shown in Table 2, where we use random sam-
1748
+ pling (RS), farthest point sampling (FPS), uniform sampling
1749
+ (US), and inverse density importance sampling (IDIS) from
1750
+ (Groh, Wieschollek, and Lensch 2018) to examine the im-
1751
+ pact of different sampling methods on rotation invariance.
1752
+ Note that point sampling affects both LRF and GRF con-
1753
+ structions in our design, therefore we can only give analysis
1754
+ when considering both reference frames. We can see that
1755
+ random sampling gives the lowest model performance with
1756
+ 89.7%, with 1.3% absolute performance drop compared to
1757
+ our method using FPS. Inverse density importance sampling
1758
+ can achieve a comparable result as our method, while it is
1759
+ not strictly invariant to rotations. We argue that due to the
1760
+ information compensation between features encoded from
1761
+ LRFs and GRF, different sampling strategies will not affect
1762
+ our model performance quite much.
1763
+ Sampling Method
1764
+ RS
1765
+ FPS (ours)
1766
+ US
1767
+ IDIS
1768
+ z/z
1769
+ 89.7
1770
+ 91.0
1771
+ 90.2
1772
+ 90.6
1773
+ z/SO(3)
1774
+ 89.7
1775
+ 91.0
1776
+ 90.2
1777
+ 90.6
1778
+ SO(3)/SO(3)
1779
+ 89.7
1780
+ 91.0
1781
+ 90.2
1782
+ 90.6
1783
+ Table 2: Classification results (%) on ModelNet40 with dif-
1784
+ ferent re-sampling techniques.
1785
+ Visualization of U.
1786
+ To better present the discriminabil-
1787
+ ity of the learned features, we summarize the shape fea-
1788
+ ture representation U by maxpooling and visualize it via t-
1789
+ SNE (Van der Maaten and Hinton 2008). Experiments are
1790
+ conducted on object classification under z/z and z/SO(3).
1791
+ Only the first 16 classes are selected for a clear represen-
1792
+ tation purpose as shown in Fig. 3. Although it is difficult to
1793
+ correctly separate all categories, we can see that some shape
1794
+ classes can be perfectly predicted, and the overall represen-
1795
+ tation ability of U under different testing protocols is satis-
1796
+ factory and consistent.
1797
+ Figure 3: t-SNE of the aggregated U with z/SO(3) (Left) and
1798
+ SO(3)/SO(3) (Right). Clusters indicate good predictions in
1799
+ object classification.
1800
+ Constructions of pℓ
1801
+ ij.
1802
+ We examine the model performance
1803
+ when using different methods to construct the local rotation-
1804
+ invariant feature pℓ
1805
+ ij. Specifically, in addition to the proposed
1806
+ Method
1807
+ PPFs
1808
+ LRFs (Ours)
1809
+ Acc. (%)
1810
+ 89.3
1811
+ 91.0
1812
+ Table 3: Classification results (%) on ModelNet40 with
1813
+ z/SO(3).
1814
+ method that builds pℓ
1815
+ ij based on LRFs, we examine point-
1816
+ pair features (PPFs) to build pℓ
1817
+ ij following (Deng, Birdal,
1818
+ and Ilic 2018). As reported in Table 3, we find that the model
1819
+ performance of using PPFs is lower than our LRF-based
1820
+ method. The reason is that point positions, which provide
1821
+ information of exact shape of the 3D objects, are important
1822
+ for shape learning. However, point-pair features give infor-
1823
+ mation about the topology of a 3D shape, and different 3D
1824
+ shapes can have the same topology, which introduces diffi-
1825
+ culties for exact 3D shape learning.
1826
+ Model Complexity.
1827
+ Inference model sizes of different
1828
+ methods along with the corresponding construction time
1829
+ for LRFs and inference speed are reported in Table 4.
1830
+ The construction time measured in seconds (s) shows time
1831
+ cost for different models generating their low-level rotation-
1832
+ invariant shape features, where we record the total time
1833
+ for local and global representation constructions of RI-
1834
+ Framework and our work. VN-DGCNN does not compute
1835
+ the rotation-invariant shape features, therefore no result can
1836
+ be reported. The inference speed with the unit of number of
1837
+ instances evaluated within one second (ins./s) is measured
1838
+ for each method with a batch size of 1. When computing the
1839
+ inference speed, the amount of time for low-level rotation-
1840
+ invariant feature construction of methods (Li et al. 2021b;
1841
+ Kim, Park, and Han 2020; Zhang et al. 2019; Li et al. 2021a)
1842
+ is also considered. Table 4 shows that our method only needs
1843
+ a relatively short construction time for both LRFs and GRF.
1844
+ Meanwhile, the trade-off between the accuracy and infer-
1845
+ ence speed is hard to balance. We will investigate the model
1846
+ design for a much high accuracy and faster speeds in the
1847
+ future work.
1848
+ Sign Ambiguity.
1849
+ As mentioned in the main work, we pro-
1850
+ pose simple techniques to address the sign ambiguity issue
1851
+ introduced by eigenvalue decomposition when computing
1852
+ the LRFs and GRF. We thus examine the model performance
1853
+ with no sign disambiguation techniques applied, of which
1854
+ the results are reported in Table 5. It can be seen that sign
1855
+ ambiguity negatively affects the model performance, where
1856
+ Method
1857
+ Params (M)
1858
+ Times (s)
1859
+ Speed (ins./s)
1860
+ Acc (%)
1861
+ RIConv
1862
+ 0.68
1863
+ 0.041
1864
+ 396.4
1865
+ 86.4
1866
+ RI-GCN
1867
+ 4.19
1868
+ 0.057
1869
+ 139.1
1870
+ 89.5
1871
+ RI-Framework
1872
+ 2.36
1873
+ 0.134
1874
+ 43.1
1875
+ 89.4
1876
+ VN-DGCNN
1877
+ 2.77
1878
+ -
1879
+ 77.3
1880
+ 89.5
1881
+ Li et al. (2021a)
1882
+ 2.76
1883
+ 0.047
1884
+ 35.8
1885
+ 90.2
1886
+ Ours
1887
+ 3.11
1888
+ 0.043
1889
+ 205.3
1890
+ 91.0
1891
+ Table 4: Model complexity construction time for LRFs, and
1892
+ inference speed on ModelNet40 with z/SO(3), where Li
1893
+ et al. (2021a) is considered without test time augmentation.
1894
+
1895
+ performances drop by 0.7% and 0.9% when uncertainty of
1896
+ vector directions is introduced to the model training. With
1897
+ our proposed solutions, the model behavior can be stabilized
1898
+ hence the classification accuracy increases.
1899
+ Method
1900
+ no@Mℓ
1901
+ no@Mg
1902
+ no@Mℓ and Mg
1903
+ Acc. (%)
1904
+ 90.3
1905
+ 90.1
1906
+ 89.8
1907
+ Table 5: Classification results (%) on ModelNet40 with
1908
+ z/SO(3), where “no@” denotes no sign disambiguation tech-
1909
+ nique applied.
1910
+ Rotational Effect of Mg.
1911
+ As mentioned in (Li et al.
1912
+ 2021a), different ways to ensure Mg is a valid rotation ma-
1913
+ trix would result in different model performances. In this
1914
+ part, we examine four different methods to ensure Mg is a
1915
+ valid rotation as follows: (a) we randomly permute two basis
1916
+ vectors regardless of the S value; (b) we randomly negate the
1917
+ value of a basis vector regardless of the S value; (c) we per-
1918
+ mute two basis vectors of S values being the smallest two;
1919
+ and (d) we simply reverse the direction of the basis vector
1920
+ whose S value is the smallest, which is the proposed method
1921
+ in our implementation. We can see from Table 6 that our
1922
+ simple design achieves the highest value, while all the others
1923
+ decrease the model performance, which shows the effective-
1924
+ ness of our proposed method.
1925
+ 3D Semantic Segmentation.
1926
+ To check our model’s effec-
1927
+ tiveness on real-world large scenes, additional experiments
1928
+ are conducted on S3DIS dataset (Armeni et al. 2016), which
1929
+ includes six indoor areas of three different buildings. Each
1930
+ point is labeled by one of the 13 categories (e.g., ceiling,
1931
+ chair or clutter). Following the same pre-processing steps as
1932
+ (Qi et al. 2017b; Wang et al. 2019), each room is divided into
1933
+ 1m×1m blocks and for each block 4096 points are sampled
1934
+ during training process. We use area-5 for testing and all the
1935
+ other areas for training. The quantitative results are shown
1936
+ in Table 7 following (Zhao et al. 2022), where it shows that
1937
+ under random rotations, our model outperforms LGR-Net
1938
+ by 7.8%, showing a more effective way to process large in-
1939
+ door scenes. For a more intuitive understanding of our model
1940
+ performance, qualitative results are shown in Fig. 4 for ref-
1941
+ erence.
1942
+ 3D Part Segmentation.
1943
+ For visualization purposes (see
1944
+ Fig. 4 in the main work) as well as a detailed analysis of
1945
+ model behavior for each category, we report the per-class
1946
+ mIoU accuracies under z/SO(3) and SO(3)/SO(3) in Ta-
1947
+ bles 8 and 9, where bold numbers indicate the best results
1948
+ for each category. As per-class mIoU scores are not reported
1949
+ in VN-DGCNN (Deng et al. 2021), we follow the official
1950
+ implementation1 and report per-class mIoU results in both
1951
+ tables. However, the reproduced results of VN-DGCNN
1952
+ are much lower than the ones reported in their work, and
1953
+ our model achieves better segmentation results than VN-
1954
+ DGCNN (Deng et al. 2021) for most categories. Our model
1955
+ also outperforms the state-of-the-art methods (Zhao et al.
1956
+ 1https://github.com/FlyingGiraffe/vnn-pc
1957
+ Method
1958
+ a
1959
+ b
1960
+ c
1961
+ d (ours)
1962
+ Acc. (%)
1963
+ 90.1
1964
+ 90.1
1965
+ 90.5
1966
+ 91.0
1967
+ Table 6: Classification results (%) on ModelNet40 with
1968
+ z/SO(3).
1969
+ Method
1970
+ z/z
1971
+ z/SO(3)
1972
+ SO(3)/SO(3)
1973
+ PointNet (Qi et al. 2017a)
1974
+ 41.1
1975
+ 4.1
1976
+ 29.3
1977
+ DGCNN (Wang et al. 2019)
1978
+ 48.4
1979
+ 3.6
1980
+ 34.3
1981
+ RIConv (Zhang et al. 2019)
1982
+ 22.0
1983
+ 22.0
1984
+ 22.0
1985
+ LRG-Net (Zhao et al. 2022)
1986
+ 43.4
1987
+ 43.4
1988
+ 43.4
1989
+ Ours
1990
+ 51.2
1991
+ 51.2
1992
+ 51.2
1993
+ Table 7: Semantic segmentation results (mIoU) on S3DIS
1994
+ area-5.
1995
+ 2022; Luo et al. 2022) in several classes (e.g., airplane, chair,
1996
+ and table) under different testing conditions. Furthermore,
1997
+ it is also obvious that our model performance is consistent
1998
+ across all categories when tested under different rotations.
1999
+ In addition, we present more qualitative examples in Fig. 5,
2000
+ which includes all 16 classes. For each class, we show two
2001
+ pairs of ground truth and our predicted samples. We can see
2002
+ that although errors occur when the boundary between the
2003
+ different parts have marginal difference, our model achieves
2004
+ great performance for most classes.
2005
+ References
2006
+ Armeni, I.; Sener, O.; Zamir, A. R.; Jiang, H.; Brilakis, I.;
2007
+ Fischer, M.; and Savarese, S. 2016. 3D semantic parsing of
2008
+ large-scale indoor spaces. In CVPR.
2009
+ Deng, C.; Litany, O.; Duan, Y.; Poulenard, A.; Tagliasac-
2010
+ chi, A.; and Guibas, L. J. 2021. Vector neurons: A general
2011
+ framework for SO (3)-equivariant networks. In ICCV.
2012
+ Deng, H.; Birdal, T.; and Ilic, S. 2018. PPFNet: Global con-
2013
+ text aware local features for robust 3d point matching. In
2014
+ CVPR.
2015
+ Groh, F.; Wieschollek, P.; and Lensch, H. 2018.
2016
+ Flex-
2017
+ convolution. In ACCV.
2018
+ Guo, M.-H.; Cai, J.-X.; Liu, Z.-N.; Mu, T.-J.; Martin, R. R.;
2019
+ and Hu, S.-M. 2021. PCT: Point cloud transformer. In CVM.
2020
+ Kim, S.; Park, J.; and Han, B. 2020.
2021
+ Rotation-Invariant
2022
+ Local-to-Global Representation Learning for 3D Point
2023
+ Cloud. In NeurIPS.
2024
+ Li, F.; Fujiwara, K.; Okura, F.; and Matsushita, Y. 2021a. A
2025
+ Closer Look at Rotation-Invariant Deep Point Cloud Analy-
2026
+ sis. In ICCV.
2027
+ Li, X.; Li, R.; Chen, G.; Fu, C.-W.; Cohen-Or, D.; and Heng,
2028
+ P.-A. 2021b. A rotation-invariant framework for deep point
2029
+ cloud analysis. In TVCG.
2030
+ Luo, S.; Li, J.; Guan, J.; Su, Y.; Cheng, C.; Peng, J.; and
2031
+ Ma, J. 2022. Equivariant Point Cloud Analysis via Learning
2032
+ Orientations for Message Passing. In CVPR.
2033
+ Qi, C. R.; Su, H.; Mo, K.; and Guibas, L. J. 2017a. Point-
2034
+ Net: Deep learning on point sets for 3D classification and
2035
+ segmentation. In CVPR.
2036
+
2037
+ Method
2038
+ mIoU
2039
+ air
2040
+ bag
2041
+ cap
2042
+ car chair
2043
+ ear
2044
+ guitar knife lamp laptop motor mug pistol rocket skate table
2045
+ plane
2046
+ phone
2047
+ bike
2048
+ board
2049
+ PointNet (Qi et al. 2017a)
2050
+ 37.8
2051
+ 40.4 48.1 46.3 24.5 45.1
2052
+ 39.4
2053
+ 29.2
2054
+ 42.6 52.7
2055
+ 36.7
2056
+ 21.2
2057
+ 55.0 29.7
2058
+ 26.6
2059
+ 32.1
2060
+ 35.8
2061
+ PointNet++ (Qi et al. 2017b)
2062
+ 48.3
2063
+ 51.3 66.0 50.8 25.2 66.7
2064
+ 27.7
2065
+ 29.7
2066
+ 65.6 59.7
2067
+ 70.1
2068
+ 17.2
2069
+ 67.3 49.9
2070
+ 23.4
2071
+ 43.8
2072
+ 57.6
2073
+ PCT (Guo et al. 2021)
2074
+ 38.5
2075
+ 32.2 44.8 36.3 26.1 36.2
2076
+ 40.2
2077
+ 48.1
2078
+ 42.1 54.0
2079
+ 40.9
2080
+ 18.7
2081
+ 50.5 25.6
2082
+ 27.7
2083
+ 44.7
2084
+ 47.6
2085
+ RIConv (Zhang et al. 2019)
2086
+ 75.3
2087
+ 80.6 80.0 70.8 68.8 86.8
2088
+ 70.3
2089
+ 87.3
2090
+ 84.7 77.8
2091
+ 80.6
2092
+ 57.4
2093
+ 91.2 71.5
2094
+ 52.3
2095
+ 66.5
2096
+ 78.4
2097
+ GCAConv (Zhang et al. 2020)
2098
+ 77.2
2099
+ 80.9 82.6 81.0 70.2 88.4
2100
+ 70.6
2101
+ 87.1
2102
+ 87.2 81.8
2103
+ 78.9
2104
+ 58.7
2105
+ 91.0 77.9
2106
+ 52.3
2107
+ 66.8
2108
+ 80.3
2109
+ RI-Framework (Li et al. 2021b)
2110
+ 79.2
2111
+ 81.4 82.3 86.3 75.3 88.5
2112
+ 72.8
2113
+ 90.3
2114
+ 82.1 81.3
2115
+ 81.9
2116
+ 67.5
2117
+ 92.6 75.5
2118
+ 54.8
2119
+ 75.1
2120
+ 78.9
2121
+ LGR-Net (Zhao et al. 2022)
2122
+ 80.0
2123
+ 81.5 80.5 81.4 75.5 87.4
2124
+ 72.6
2125
+ 88.7
2126
+ 83.4 83.1
2127
+ 86.8
2128
+ 66.2
2129
+ 92.9 76.8
2130
+ 62.9
2131
+ 80.0
2132
+ 80.0
2133
+ VN-DGCNN⋆ (Deng et al. 2021)
2134
+ 75.3
2135
+ 81.1 74.8 72.9 73.8 87.8
2136
+ 55.9
2137
+ 91.4
2138
+ 83.8 80.2
2139
+ 84.4
2140
+ 44.5
2141
+ 92.8 74.6
2142
+ 57.2
2143
+ 70.2
2144
+ 78.9
2145
+ OrientedMP (Luo et al. 2022)
2146
+ 80.1
2147
+ 81.7 79.0 85.0 78.1 89.7
2148
+ 76.5
2149
+ 91.6
2150
+ 85.9 81.6
2151
+ 82.1
2152
+ 67.6
2153
+ 95.0 79.6
2154
+ 64.4
2155
+ 76.9
2156
+ 80.7
2157
+ Ours
2158
+ 80.3
2159
+ 84.5 82.7 83.9 76.6 90.2
2160
+ 76.1
2161
+ 91.6
2162
+ 86.6 83.5
2163
+ 84.6
2164
+ 50.1
2165
+ 94.4 81.9
2166
+ 60.3
2167
+ 75.3
2168
+ 81.8
2169
+ Table 8: Segmentation results of class-wise and averaged mIoU on ShapeNetPart under z/SO(3), where ⋆ means our reproduced
2170
+ results of VN-DGCNN using the official code.
2171
+ Method
2172
+ mIoU
2173
+ air
2174
+ bag
2175
+ cap
2176
+ car chair
2177
+ ear
2178
+ guitar knife lamp laptop motor mug pistol rocket skate table
2179
+ plane
2180
+ phone
2181
+ bike
2182
+ board
2183
+ PointNet (Qi et al. 2017a)
2184
+ 74.4
2185
+ 81.6 68.7 74.0 70.3 87.6
2186
+ 68.5
2187
+ 88.9
2188
+ 80.0 74.9
2189
+ 83.6
2190
+ 56.5
2191
+ 77.6 75.2
2192
+ 53.9
2193
+ 69.4
2194
+ 79.9
2195
+ PointNet++ (Qi et al. 2017b)
2196
+ 76.7
2197
+ 79.5 71.6 87.7 70.7 88.8
2198
+ 64.9
2199
+ 88.8
2200
+ 78.1 79.2
2201
+ 94.9
2202
+ 54.3
2203
+ 92.0 76.4
2204
+ 50.3
2205
+ 68.4
2206
+ 81.0
2207
+ PCT (Wang et al. 2019)
2208
+ 75.2
2209
+ 80.1 69.0 82.5 66.8 88.4
2210
+ 69.4
2211
+ 90.4
2212
+ 85.3 81.8
2213
+ 79.6
2214
+ 39.9
2215
+ 89.2 76.5
2216
+ 51.8
2217
+ 72.6
2218
+ 80.0
2219
+ RIConv (Zhang et al. 2019)
2220
+ 75.5
2221
+ 80.6 80.2 70.7 68.8 86.8
2222
+ 70.4
2223
+ 87.2
2224
+ 84.3 78.0
2225
+ 80.1
2226
+ 57.3
2227
+ 91.2 71.3
2228
+ 52.1
2229
+ 66.6
2230
+ 78.5
2231
+ GCAConv (Zhang et al. 2020)
2232
+ 77.3
2233
+ 81.2 82.6 81.6 70.2 88.6
2234
+ 70.6
2235
+ 86.2
2236
+ 86.6 81.6
2237
+ 79.6
2238
+ 58.9
2239
+ 90.8 76.8
2240
+ 53.2
2241
+ 67.2
2242
+ 81.6
2243
+ RI-Framework (Li et al. 2021b)
2244
+ 79.4
2245
+ 81.4 84.5 85.1 75.0 88.2
2246
+ 72.4
2247
+ 90.7
2248
+ 84.4 80.3
2249
+ 84.0
2250
+ 68.8
2251
+ 92.6 76.1
2252
+ 52.1
2253
+ 74.1
2254
+ 80.0
2255
+ LGR-Net (Zhao et al. 2022)
2256
+ 80.1
2257
+ 81.7 78.1 82.5 75.1 87.6
2258
+ 74.5
2259
+ 89.4
2260
+ 86.1 83.0
2261
+ 86.4
2262
+ 65.3
2263
+ 92.6 75.2
2264
+ 64.1
2265
+ 79.8
2266
+ 80.5
2267
+ VN-DGCNN⋆ (Deng et al. 2021)
2268
+ 74.7
2269
+ 80.0 79.4 79.1 71.5 89.2
2270
+ 66.1
2271
+ 89.0
2272
+ 83.5 80.6
2273
+ 82.0
2274
+ 29.3
2275
+ 91.4 73.4
2276
+ 51.5
2277
+ 67.8
2278
+ 81.0
2279
+ OrientedMP (Luo et al. 2022)
2280
+ 80.9
2281
+ 81.8 78.8 85.4 78.0 89.6
2282
+ 76.7
2283
+ 91.6
2284
+ 85.7 81.7
2285
+ 82.1
2286
+ 67.6
2287
+ 95.0 79.1
2288
+ 63.5
2289
+ 76.5
2290
+ 81.0
2291
+ Ours
2292
+ 80.4
2293
+ 84.3 82.2 84.6 77.9 89.9
2294
+ 76.6
2295
+ 91.3
2296
+ 86.7 84.1
2297
+ 84.3
2298
+ 50.1
2299
+ 93.4 79.0
2300
+ 63.7
2301
+ 75.3
2302
+ 82.3
2303
+ Table 9: Segmentation results of class-wise and averaged mIoU on ShapeNetPart under SO(3)/SO(3).
2304
+ Qi, C. R.; Yi, L.; Su, H.; and Guibas, L. J. 2017b. Point-
2305
+ Net++: Deep hierarchical feature learning on point sets in a
2306
+ metric space. In NeurIPS.
2307
+ Van der Maaten, L.; and Hinton, G. 2008. Visualizing data
2308
+ using t-SNE. In JMLR.
2309
+ Wang, Y.; Sun, Y.; Liu, Z.; Sarma, S. E.; Bronstein, M. M.;
2310
+ and Solomon, J. M. 2019. Dynamic graph CNN for learning
2311
+ on point clouds. In ACM ToG.
2312
+ Zhang, Z.; Hua, B.-S.; Chen, W.; Tian, Y.; and Yeung, S.-K.
2313
+ 2020. Global context aware convolutions for 3D point cloud
2314
+ understanding. In 3DV.
2315
+ Zhang, Z.; Hua, B.-S.; Rosen, D. W.; and Yeung, S.-K. 2019.
2316
+ Rotation invariant convolutions for 3D point clouds deep
2317
+ learning. In 3DV.
2318
+ Zhao, C.; Yang, J.; Xiong, X.; Zhu, A.; Cao, Z.; and Li, X.
2319
+ 2022. Rotation invariant point cloud analysis: Where local
2320
+ geometry meets global topology. In Pattern Recognition.
2321
+
2322
+ Input
2323
+ GT
2324
+ Ours
2325
+ ceiling
2326
+ floor
2327
+ wall
2328
+ beam
2329
+ column
2330
+ window
2331
+ door
2332
+ table
2333
+ chair
2334
+ sofa
2335
+ bookcase
2336
+ board
2337
+ clutter
2338
+ Figure 4: Visualization of semantic segmentation results on S3DIS area-5. The first row is the original inputs, the second row
2339
+ is the ground truth (GT) samples and the last row is our predicted results.
2340
+
2341
+ GT
2342
+ Ours
2343
+ GT
2344
+ Ours
2345
+ GT
2346
+ Ours
2347
+ GT
2348
+ Ours
2349
+ Figure 5: Segmentation comparisons between the ground truth (GT) and our model on ShapeNetPart dataset under z/SO(3).
2350
+
2351
+ 1111
UNAyT4oBgHgl3EQfVvd8/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
UNE5T4oBgHgl3EQfAw5l/content/tmp_files/2301.05381v1.pdf.txt ADDED
@@ -0,0 +1,1938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.05381v1 [math.AT] 13 Jan 2023
2
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH
3
+ Z2 COEFFICIENTS
4
+ KATE POIRIER AND THOMAS TRADLER
5
+ Abstract. Luc Menichi showed that the BV algebras on H●(LS2;Z2)[−2] coming from
6
+ string topology and the one on HH●(H●(S2;Z2),H●(S2;Z2)) using Poincar´e duality on
7
+ H●(S2;Z2) are not isomorphic.
8
+ In this note we show how one can obtain the string
9
+ topology BV algebra on Hochschild cohomology using a Poincar´e duality structure with
10
+ higher homotopies.
11
+ This Poincar´e duality (with higher homotopies) on cohomology is
12
+ induced by a local Poincar´e duality (with higher homotopies) on the cochain level.
13
+ 1. Main Statements
14
+ 1.1. BV algebras for the 2-sphere with Z2 coefficients. In [M, Theorem 24] Luc
15
+ Menichi calculated the string topology BV algebra (defined by Moira Chas and Dennis
16
+ Sullivan in [CS]) for the 2-sphere M = S2 with Z2 = Z/2Z coefficients to be
17
+ H●(LS2;Z2) ≅ Λa ⊗ Z2[u] ≅ ⊕
18
+ k≥0
19
+ Z2.αk ⊕ ⊕
20
+ k≥0
21
+ Z2.βk,
22
+ (1.2)
23
+ where αk ⋅ αℓ = αk+ℓ,βk ⋅ βℓ = 0, and αk ⋅ βℓ = βℓ ⋅ αk = βk+ℓ,
24
+ and ∆ST(αk) = 0 and ∆ST(βk) = k ⋅ αk−1 + k ⋅ βk+1.
25
+ Here, the degrees are given by ∣a∣ = −2, ∣u∣ = 1, and thus, setting αk = 1⊗uk and βk = a⊗uk,
26
+ we have ∣αk∣ = k, ∣βk∣ = k − 2.
27
+ Moreover, in [M, Proposition 20], Menichi calculated the BV algebra for Hochschild
28
+ cohomology of the cohomology of S2 with Z2 coefficients to be
29
+ HH●(H●(S2;Z2);H●(S2;Z2)) ≅ Λg ⊗ Z2[f] ≅ ⊕
30
+ k≥0
31
+ Z2.φk ⊕ ⊕
32
+ k≥0
33
+ Z2.ψk,
34
+ (1.3)
35
+ where φk ⋅ φℓ = φk+ℓ,ψk ⋅ ψℓ = 0, and φk ⋅ ψℓ = ψℓ ⋅ φk = ψk+ℓ,
36
+ and ∆(φk) = 0 and ∆(ψk) = k ⋅ φk−1.
37
+ There degrees are similarly given by ∣g∣ = −2, ∣f∣ = 1, and so, setting φk = 1 ⊗ f k and
38
+ ψk = g ⊗ f k, we have ∣φk∣ = k, ∣ψk∣ = k − 2.
39
+ Note, that these BV algebras differ in their ∆ operators, and, in fact, Menichi obtained
40
+ the following result.
41
+ Theorem 1.4. [M, Corollary 30] There is no isomorphism of BV algebras between
42
+ (H●(LS2;Z2),⋅,∆ST) and (HH●(H●(S2;Z2);H●(S2;Z2)),⋅,∆).
43
+ It is worth noting though, that the induced Gerstenhaber algebras on H●(LS2;Z2) and
44
+ HH●(H●(S2;Z2);H●(S2;Z2)) are isomorphic; cf. [M, Corollary 23].
45
+ 2010 Mathematics Subject Classification. 55P50, 16E40 (primary), 57P10, 08A65 (secondary).
46
+ Key words and phrases. String topology, Hochschild cohomology, BV algebra, Poincar´e duality.
47
+ 1
48
+
49
+ 2
50
+ K. POIRIER AND T. TRADLER
51
+ 1.5. The BV algebra coming from local Poincar´e duality. A crucial ingredient for de-
52
+ termining the ∆ operator on Hochschild cohomology (1.3) comes from a choice of Poincar´e
53
+ duality structure given as a bimodule isomorphism F ∶ H●(S2;Z2)
54
+
55
+ �→ H●(S2;Z2). For the
56
+ (Z2-)cohomology H●(S2;Z2) and homology H●(S2;Z2), an obvious choice is to define the
57
+ degree +2 isomorphism F as follows:
58
+ degree −2
59
+ degree −1
60
+ degree 0
61
+ degree 1
62
+ degree 2
63
+ H●(S2;Z2)
64
+ = ...
65
+ Z2
66
+ 0
67
+ Z2
68
+ 0
69
+ 0
70
+ ...
71
+ H●(S2;Z2)
72
+ = ...
73
+ 0
74
+ 0
75
+ Z2
76
+ 0
77
+ Z2
78
+ ...
79
+ F
80
+ F
81
+ F
82
+ Indeed, this choice of F, which is the map given by capping with the Z2-fundamental class
83
+ of S2, leads to the BV algebra in (1.3) as we will confirm in theorem 1.6 (together with
84
+ 5.3) below.
85
+ Note that on the cochain level, capping with a fundamental cycle is not a bimodule
86
+ map; see observation 4.4. Using a generalization of bimodule maps, which, in addition
87
+ to F, allows for higher homotopies, we show in this paper that one can obtain the string
88
+ topology BV algebra (1.2) on Hochschild cohomology. The precise definition of bimodule
89
+ maps with higher homotopies was studied in [T1] and will be reviewed in section 2 below.
90
+ Applying the concept of bimodule maps with higher homotopies, we will construct a spe-
91
+ cific example of such a map for the case of a local cochain model of the 2-sphere C●(S2;Z2)
92
+ in example 4.20. Then we pull this map back to obtain a bimodule map with homotopies
93
+ ̃F for cohomology H●(S2;Z2) (see proposition 4.23). The resulting map ̃F, recorded in
94
+ example 4.2, has precisely one higher homotopy, when compared to F (see (4.3)). Now,
95
+ using this ̃F, we obtain the following main theorem.
96
+ Theorem 1.6. The BV algebra on HH●(H●(S2;Z2);H●(S2;Z2)) induced by ̃F (coming
97
+ from a local bimodule map with higher homotopies transferred to cohomology) is isomorphic
98
+ to the string topology BV algebra (1.2) on H●(LS2;Z2).
99
+ Proof. We compute both BV algebras coming from F and ̃F, respectively.
100
+ Denote by
101
+ H● ∶= H●(S2;Z2) and H● ∶= H●(S2;Z2). Then the bimodule map F and the bimodule map
102
+ ̃F (with higher homotopies) induce respective (graded module) isomorphisms F and ̃
103
+ F of
104
+ Hochschild cohomologies (see 5.3 and 5.4)
105
+ (1.7)
106
+ F, ̃
107
+ F ∶ HH●(H●,H●)
108
+
109
+ �→ HH●(H●,H●).
110
+ Next, we use the explicit description (as reviewed in 5.1 and 5.2) stating that
111
+ HH●(H●,H●) ≅ ⊕
112
+ k≥0
113
+ Z2.φk ⊕ ⊕
114
+ k≥0
115
+ Z2.ψk,
116
+ where ∣φk∣ = k, ∣ψk∣ = k − 2,
117
+ HH●(H●,H●) ≅ ⊕
118
+ k≥0
119
+ Z2.θk ⊕ ⊕
120
+ k��0
121
+ Z2.χk,
122
+ where ∣θk∣ = k + 2, ∣χk∣ = k.
123
+ With this notation, F induces in (1.7) the map F(φk) = θk,F(ψk) = χk (see 5.3), while ̃F
124
+ induces the map ̃
125
+ F(φk) = θk + χk+2, ̃
126
+ F(ψk) = χk (computed in 5.4).
127
+
128
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
129
+ 3
130
+ The BV ∆ operator on HH●(H●,H●) is given by transferring Connes’ B-operator from
131
+ HH●(H●,H●) to HH●(H●,H●) via the given Poincar´e duality bimodule isomorphism,
132
+ where, Connes’ B-operator is the map (computed in 5.2):
133
+ B ∶ HH●(H●,H●) → HH●(H●,H●),
134
+ B(θk) = 0,B(χk) = k ⋅ θk−1.
135
+ Thus the induced operators ∆ = F −1○B○F and ̃∆ = ̃
136
+ F −1○B○ ̃
137
+ F from F and ̃F, respectively,
138
+ are:
139
+ ∆(φk) = 0,
140
+ ∆(ψk) = F −1(k ⋅ θk−1) = k ⋅ φk−1,
141
+ (1.8)
142
+ ̃∆(φk−2) = ̃∆(ψk) = ̃
143
+ F −1(k ⋅ θk−1) = k ⋅ (φk−1 + ψk+1),
144
+ (1.9)
145
+ where we used that ̃
146
+ F −1 is given by ̃
147
+ F −1(θk) = φk + ψk+2 and ̃
148
+ F −1(χk) = ψk.
149
+ Note that the ∆ operator in (1.8) coincides with ∆ from (1.3). To see that the BV algebra
150
+ with ̃∆ is isomorphic to the string topology BV algebra (1.2), we use an isomorphism which
151
+ was essentially described in [M, last paragraph of Lemma 21]: the map Θ ∶ HH●(H●,H●) →
152
+ H●(LS2;Z2) with Θ(φk) = αk +k ⋅βk+2 and Θ(ψk) = βk is an algebra isomorphism such that
153
+ ∆ST ○ Θ = Θ ○ ̃∆.
154
+
155
+ 1.10. Organization of the paper. In section 2, we review basics about Hochschild
156
+ cochain complexes and morphisms between them coming from bimodule maps up to higher
157
+ homotopies. In section 3 we review the induced BV algebra coming from a chosen Poincar´e
158
+ duality structure. In section 4 we give various computations of bimodule maps (with and
159
+ without higher homotopies). In particular, the construction of a bimodule map (up to
160
+ certain controlled structures) for the 2-simplex coming from locality (see example 4.9) con-
161
+ stitutes the main computational aspect of this paper, with its proof being spelled out in
162
+ appendix A. In section 5 we compute the BV algebras coming from the Poincar´e duality
163
+ structures on cohomology of the 2-sphere from section 4, and with this complete the proof
164
+ of theorem 1.6.
165
+ Acknowledgments. We would like to thank Mahmoud Zeinalian for discussions about
166
+ this topic. The second author was partially supported by a PSC-CUNY research award.
167
+ 2. Bimodule maps with higher homotopies
168
+ In this section we review bimodule maps, bimodule maps up to higher homotopies,
169
+ and homotopy inner products; see also [T1]. Moreover we review the induced maps on
170
+ Hochschild cohomology.
171
+ 2.1. Basic setup. Denote by R a commutative ring with unit; our main example of interest
172
+ is R = Z2. Let A be a unital dg-algebra over R, where we note that all the differentials in
173
+ this paper will always go down, i.e., d ∶ Aj → Aj−1. We denote by A the space A shifted up
174
+ by one, i.e., Aj ∶= Aj−1.
175
+ Let M be a dg-bimodule over A. If we want to emphasize the corresponding algebra A,
176
+ then we will also write M/A instead of M. From now on, all modules as well as module
177
+ maps will be written in bold.
178
+ Note that if M is a dg-bimodule over A, then so is its dual space M∗ with M∗
179
+ j ∶= (M−j)∗ =
180
+ HomR(M−j,R) with differential d ∶ M∗
181
+ j → M∗
182
+ j−1,d(n)(m) ∶= (−1)∣n∣+1n(d(m)), and module
183
+ maps (n.a)(m) ∶= n(a.m) and (a.n)(m) ∶= (−1)∣a∣⋅(∣n∣+∣m∣)n(m.a) for n ∈ M∗,m ∈ M,a ∈ A,
184
+
185
+ 4
186
+ K. POIRIER AND T. TRADLER
187
+ where ∣.∣ denotes the degree. Moreover, the dg-algebra A is itself a dg-module A ∶= A over
188
+ A (with module structure given by the algebra multiplication), and thus A∗ is a dg-module
189
+ over A as well.
190
+ Define the Hochschild cochain complex of A with values in M to be given by CH●(A,M) ∶=
191
+ ∏r≥0 Hom(A⊗r,M), where the differential D = D0 + D1, with D2 = 0, is defined for ϕ ∈
192
+ Hom(A⊗r,M) by setting
193
+ D0(ϕ)(a1,... ,ar) ∶= d(ϕ(a1,... ,ar)) +
194
+ r
195
+
196
+ j=1
197
+ (−1)∣ϕ∣+∣a1∣+⋅⋅⋅+∣aj−1∣ ⋅ ϕ(a1,... ,daj,... ,ar),
198
+ D1(ϕ)(a1,... ,ar+1) ∶= (−1)∣ϕ∣⋅∣a1∣ ⋅ a1.ϕ(a2,... ,ar+1)
199
+ +
200
+ r
201
+
202
+ j=1
203
+ (−1)∣ϕ∣+∣a1∣+⋅⋅⋅+∣aj∣ ⋅ ϕ(a1,... ,aj ⋅ aj+1,... ,ar+1) + (−1)∣ϕ∣+1+∣a1∣+⋅⋅⋅+∣ar∣ ⋅ ϕ(a1,... ,ar).ar+1.
204
+ We will mainly use the normalized Hochschild cochain complex CH
205
+ ●(A,M), which is the
206
+ subcomplex of CH●(A,M) consisting of those ϕ ∈ CH●(A,M) which vanish when any of the
207
+ inputs is the unit 1 ∈ A. The inclusion CH
208
+ ●(A,M) ↪ CH●(A,M) is a quasi-isomorphism;
209
+ see [L, 1.5.7].
210
+ 2.2. Inner products and homotopy inner products. Let M and N be dg-bimodules
211
+ over A, and let F ∶ M → N be a dg-bimodule map. Then, there is an induced cochain map
212
+ CH(F) ∶ CH
213
+ ●(A,M) → CH
214
+ ●(A,N), ϕ ↦ F ○ ϕ. An inner product on M is a dg-bimodule
215
+ map F ∶ M → M∗.
216
+ We will need a more general notion of inner product F ∶ M → M∗ which also allows
217
+ for higher homotopies. To this end, consider a sequence of maps F = {Fp,q ∶ A⊗p ⊗ M ⊗
218
+ A⊗q → M∗}p,q≥0, and define its differential by setting DF = {(DF)p,q}p,q≥0 to be (DF)p,q =
219
+ (D0F)p,q + (D1F)p,q, where ∀a1,... ,ap,b1,... ,bq,∈ A and m,n ∈ M:
220
+ (D0F)p,q(a1,... ,ap;m;b1,... ,bq)(n)
221
+ (2.3)
222
+ ∶=
223
+ p
224
+
225
+ j=1
226
+ (−1)∣F∣+∣a1∣+⋅⋅⋅+∣aj−1∣ ⋅ Fp,q(a1,... ,daj,... ,ap;m;b1,... ,bq)(n)
227
+ + (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+1 ⋅ Fp,q(a1,... ,ap;dm;b1,... ,bq)(n)
228
+ +
229
+ q
230
+
231
+ j=1
232
+ (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+∣m∣+∣b1∣+⋅⋅⋅+∣bj−1∣ ⋅ Fp,q(a1,... ,ap;m;b1,... ,dbj,... ,bq)(n)
233
+ + (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+∣m∣+∣b1∣+⋅⋅⋅+∣bq∣+1 ⋅ Fp,q(a1,... ,ap;m;b1,... ,bq)(dn)
234
+ and, using the notation F−1,q = Fp,−1 = 0:
235
+ (D1F)p,q(a1,... ,ap;m;b1,... ,bq)(n)
236
+ (2.4)
237
+ ∶= (−1)∣F∣+∣a1∣⋅(∣a2∣+⋅⋅⋅+∣ap∣+∣m∣+∣b1∣+⋅⋅⋅+∣bq∣+∣n∣) ⋅ Fp−1,q(a2,... ,ap;m;b1,... ,bq)(n.a1)
238
+ +
239
+ p−1
240
+
241
+ j=1
242
+ (−1)∣F∣+∣a1∣+⋅⋅⋅+∣aj∣ ⋅ Fp−1,q(a1,... ,aj ⋅ aj+1,... ,ap;m;b1,... ,bq)(n)
243
+ + (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap−1∣+1 ⋅ Fp−1,q(a1,... ,ap−1;ap.m;b1,... ,bq)(n)
244
+
245
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
246
+ 5
247
+ + (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+∣m∣ ⋅ Fp,q−1(a1,... ,ap;m.b1;b2,... ,bq)(n)
248
+ +
249
+ q−1
250
+
251
+ j=1
252
+ (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+∣m∣+∣b1∣+⋅⋅⋅+∣bj∣ ⋅ Fp,q−1(a1,... ,ap;m;b1,... ,bj ⋅ bj+1,... ,bq)(n)
253
+ + (−1)∣F∣+∣a1∣+⋅⋅⋅+∣ap∣+∣m∣+∣b1∣+⋅⋅⋅+∣bq−1∣+1 ⋅ Fp,q−1(a1,... ,ap;m;b1,... ,bq−1)(bq.n)
254
+ Then we call F a homotopy inner product for M if DF = 0. By slight abuse of notation,
255
+ we will still use the notation F ∶ M → M∗ for F = {Fp,q}p,q with all its homotopies.
256
+ We will sometimes depict evaluations of F as follows:
257
+ (2.5)
258
+ Fp,q(a1,... ,ap;m;b1,... ,bq)(n)
259
+ a1
260
+ a2
261
+ ...
262
+ ap
263
+ m
264
+ b1
265
+ b2
266
+ ...
267
+ bq
268
+ n
269
+ Note, that a homotopy inner product F = {Fp,q}p,q with Fp,q = 0 for p + q > 0 is precisely
270
+ an inner product, i.e., a dg-bimodule map F0,0 ∶ M → M∗.
271
+ Let F be a homotopy inner product for M. We will always assume that F vanishes
272
+ when any of the algebra inputs is the unit 1 ∈ A. Then, there is an induced map F ∶=
273
+ CH(F) ∶ CH
274
+ ●(A,M) → CH
275
+ ●(A,M∗) by setting F = CH(F) = ∑p,q≥0 CH(F)p,q, where
276
+ CH(F)p,q ∶ Hom(A⊗r,M) → Hom(A⊗p+r+q,M∗) is
277
+ (2.6)
278
+ (CH(F)p,q(ϕ))(a1,... ,ap+r+q)
279
+ ∶= (−1)∣ϕ∣⋅(∣a1∣+⋅⋅⋅+∣ap∣) ⋅ Fp,q(a1,... ,ap;ϕ(ap+1,... ,ap+r);ap+r+1,... ,ap+r+q).
280
+ a1
281
+ a2
282
+ ...
283
+ ap
284
+ ap+r+1
285
+ ...
286
+ ap+r+q
287
+ ϕ
288
+ ap+1
289
+ ap+r
290
+ ...
291
+ A direct but lengthy calculation shows that
292
+ (2.7)
293
+ CH(DF)(ϕ) = D ○ CH(F)(ϕ) − (−1)∣F∣ ⋅ CH(F) ○ D(ϕ),
294
+ ∀ϕ ∈ CH●(A,M).
295
+ Corollary 2.8. If F is a homotopy inner product, i.e., DF = 0, then F = CH(F) ∶
296
+ CH
297
+ ●(A,M) → CH
298
+ ●(A,M∗) is a cochain map. By abuse of notation, we often write F
299
+ for the induced map on Hochschild cohomology. If F = DF′ for some F′, then the induced
300
+ map on Hochschild cohomology vanishes, i.e., HH(F) = 0.
301
+ 2.9. Pullback under a dg-morphism. Let A and B be two unital dg-algebras, and
302
+ let f ∶ B → A be a dg-algebra map (which is necessarily of degree 0).
303
+ Then any dg-
304
+ bimodule M = M/A over A induces a dg-bimodule M/B ∶= M over B via the module structure
305
+ b.m ∶= f(b).m and m.b ∶= m.f(b) for any m ∈ M,b ∈ B. Moreover, any dg-bimodule map
306
+ F/A ∶ M/A → N/A also induces a dg-bimodule map F/B ∶ M/B → N/B. Moreover, a homotopy
307
+ inner product F/A for M/A also induces a homotopy inner product for M/B via
308
+ (F/B)p,q(b1,... ,bp;m;c1,... ,cq)(n) ∶= (F/A)p,q(f(b1),... ,f(bp);m;f(c1),... ,f(cq))(n)
309
+
310
+ 6
311
+ K. POIRIER AND T. TRADLER
312
+ for all b1,... ,bp,c1,... ,cq,∈ B and m,n ∈ M/B.
313
+ Moreover, for a dg-algebra map f ∶ B → A, we have the dg-modules B/B and B∗
314
+ /B, and,
315
+ from A/A and A∗
316
+ /A, we also get A/B and A∗
317
+ /B. The dg-algebra map f then induces a dg-
318
+ bimodule map f/B ∶ B/B → A/B and, by dualizing, the dg-bimodule map f∗
319
+ /B ∶ A∗
320
+ /B → B∗
321
+ /B.
322
+ Combining the last two paragraphs, assume that f ∶ B → A is a dg-algebra map, and that
323
+ F/A ∶ A/A → A∗
324
+ /A is a homotopy inner product for A. Then, we get a transferred homotopy
325
+ inner product f(F)/B ∶ B/B → B∗
326
+ /B for B, given by f(F)/B ∶= f∗
327
+ /B ○ F/B ○ f/B,
328
+ B/B
329
+ f/B
330
+
331
+ f(F)/B
332
+
333
+ A/B
334
+ F/B
335
+
336
+ B∗
337
+ /B
338
+ A∗
339
+ /B
340
+ f∗
341
+ /B
342
+
343
+ (2.10)
344
+ (f(F)/B)p,q(b1,... ,bp;m;c1,... ,cq)(n)
345
+ = (F/A)p,q(f(b1),... ,f(bp);f(m);f(c1),... ,f(cq))(f(n)),
346
+ where b1,... ,bp,m,c1,... ,cq,n ∈ B.
347
+ Finally, for a dg-algebra map f ∶ B → A, and a dg-bimodule M/A, there is an induced
348
+ cochain map on Hochschild cochains, CH(f,M) ∶ CH
349
+ ●(A,M/A) → CH
350
+ ●(B,M/B),
351
+ (2.11)
352
+ CH(f;M)(ϕ)(b1,... ,br) ∶= ϕ(f(b1),... ,f(br)),
353
+ ∀ϕ ∈ CH
354
+ ●(A,M/A),b1,... ,br ∈ B.
355
+ With this we get the following lemma.
356
+ Lemma 2.12. If f ∶ B → A is a dg-algebra map, and F/A ∶ A/A → A∗
357
+ /A is a homotopy inner
358
+ product for A/A, then the following diagram commutes.
359
+ (2.13)
360
+ CH
361
+ ●(B,B/B)
362
+ CH(f/B)
363
+
364
+ CH(f(F)/B)
365
+
366
+ CH
367
+ ●(B,A/B)
368
+ CH(F/B)
369
+
370
+ CH
371
+ ●(A,A/A)
372
+ CH(f,A)
373
+
374
+ CH(F/A)
375
+
376
+ CH
377
+ ●(B,B∗
378
+ /B)
379
+ CH
380
+ ●(B,A∗
381
+ /B)
382
+ CH(f∗
383
+ /B)
384
+
385
+ CH
386
+ ●(A,A∗
387
+ /A)
388
+ CH(f,A∗)
389
+
390
+ 2.14. Isomorphism on Hochschild cohomology. A particular case of interest is when
391
+ a homotopy inner product F ∶ A → A∗ induces an isomorphism on Hochschild cohomology
392
+ F ∶= HH(F) ∶ HH●(A,A) → HH●(A,A∗). In this case, we can transfer any structure
393
+ between these Hochschild cohomologies, in particular we can transfer the B operator as it
394
+ was done in theorem 1.6.
395
+ Note that in equation (2.13), if the four horizontal maps are quasi-isomorphisms, then,
396
+ obviously, the right vertical map CH(F/A) is a quasi-isomorphism iff the left vertical map
397
+ CH(f(F)/B) is a quasi-isomorphism.
398
+ 3. BV algebra on Hochschild cohomology
399
+ We now review how homotopy inner products induce a BV algebra on Hochschild co-
400
+ homology; see theorem 3.7. Our main reference for this is [T2]. We start by defining two
401
+ operators B and ∆F.
402
+
403
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
404
+ 7
405
+ Definition 3.1. Consider a unital dg-algebra A.
406
+ (1) We define Connes’ B-operator (or more precisely the dual of Connes’ B-operator)
407
+ to be B ∶ CH
408
+ ●(A,A∗) → CH
409
+ ●(A,A∗) given for ϕ ∈ Hom(A⊗r,A∗) by B(ϕ) ∈
410
+ Hom(A⊗r−1,A∗) with
411
+ (3.2)
412
+ (B(ϕ))(a1,... ,ar−1)(ar)
413
+ ∶=
414
+ r
415
+
416
+ j=1
417
+ (−1)(∣aj∣+⋅⋅⋅+∣ar∣)⋅(∣a1∣+⋅⋅⋅+∣aj−1∣)+∣ar∣ ⋅ ϕ(aj,... ,ar,a1,... ,aj−1)(1),
418
+ A direct but lengthy computation shows that D○B(ϕ) = −B○D(ϕ). By abuse of no-
419
+ tation, we denote the induced map on Hochschild cohomology by B ∶ HH●(A,A∗) →
420
+ HH●(A,A∗) as well.
421
+ (2) Next, assume that we also have a homotopy inner product F = {Fp,q ∶ A⊗p ⊗ A ⊗
422
+ A⊗q → A∗}p,q≥0.
423
+ Then, we define the operator ZF ∶=
424
+
425
+ p,q≥0ZF
426
+ p,q ∶ CH
427
+ ●(A,A) →
428
+ CH
429
+ ●(A,A∗), where, for ϕ ∈ Hom(A⊗r,A), we set ZF
430
+ p,q(ϕ) ∈ Hom(A⊗p+r+q−1,A∗) to
431
+ be given by
432
+ (ZF
433
+ p,q(ϕ))(a1,... ,ap+r+q−1)(ap+r+q)
434
+ (3.3)
435
+ ∶=
436
+ p+q
437
+
438
+ j=p+1
439
+ (−1)∣F∣+(∣ϕ∣+1)⋅(∣a1∣+⋅⋅⋅+∣aj−1∣)+1
440
+ ⋅ Fp,q(a1,... ,ap;1;ap+1,... ,aj−1,ϕ(aj,... ,aj+r−1),aj+r,... ,ap+r+q−1)(ap+r+q)
441
+ +
442
+ r
443
+
444
+ j=1
445
+ (−1)(∣aj∣+⋅⋅⋅+∣ap+r+q∣)⋅(∣a1∣+⋅⋅⋅+∣aj−1∣)+∣ap+r+q∣+∣ϕ∣⋅(∣aj∣+⋅⋅⋅+∣aj+p+q−1∣)
446
+ ⋅ Fp,q(aj,... ,aj+p−1;1;aj+p,... ,aj+p+q−1)(ϕ(ap+j+q,... ,ap+r+q,a1,... ,aj−1))
447
+ +
448
+ p
449
+
450
+ j=1
451
+ (−1)∣F∣+(∣ϕ∣+1)⋅(∣a1∣+⋅⋅⋅+∣aj−1∣)+1
452
+ ⋅ Fp,q(a1,... ,aj−1,ϕ(aj,... ,aj+r−1),aj+r,... ,ap+r−1;1;ap+r,... ,ap+r+q−1)(ap+r+q)
453
+ One can check again that D ○ ZF(ϕ) = −(−1)∣F∣ ⋅ ZF ○ D(ϕ). By abuse of notation,
454
+ we denote the induced map on Hochschild cohomology by ZF ∶ HH●(A,A) →
455
+ HH●(A,A∗) as well.
456
+ We remark that ZF has appeared in [T2, lemma 17] as the operation associated
457
+ to the symbol
458
+ (−1)µ ⋅ 1
459
+ 1
460
+ 1
461
+ +1
462
+ +1
463
+ 1
464
+ (3) Let F be a homotopy inner product for A. Assume, moreover, that F ∶ A → A∗
465
+ induces an isomorphism on Hochschild cohomology, F ∶= HH(F) ∶ HH●(A,A) →
466
+
467
+ 8
468
+ K. POIRIER AND T. TRADLER
469
+ HH●(A,A∗). Then denote by ∆F ∶ HH●(A,A) → HH●(A,A) the composition
470
+ ∆F ∶= F −1 ○ ZF,
471
+ HH●(A,A)
472
+ ZF
473
+ �→ HH●(A,A∗)
474
+ F−1
475
+ �→ HH●(A,A)
476
+ Lemma 3.4. Let A be a unital dg-algebra.
477
+ (1) On Hochschild cochains HH●(A,A∗), we have that B2 = 0.
478
+ (2) Assume that F ∶ A → A∗ is a homotopy inner product which induces an isomorphism
479
+ on Hochschild cohomology F ∶ HH●(A,A) → HH●(A,A∗). Then the deviation of
480
+ ∆F from being a derivation of the cup product is the usual Gerstenhaber bracket.
481
+ Here the cup product on Hochschild cohomology is given for Hochschild cochains
482
+ ϕ ∈ Hom(A⊗r,A) and ρ ∈ Hom(A⊗s,A) to be ϕ ⌣ ρ ∈ Hom(A⊗r+s,A) with
483
+ (3.5)
484
+ (ϕ ⌣ ρ)(a1,... ,ar+s) ∶= (−1)∣ρ∣⋅(∣a1∣+⋅⋅⋅+∣ar∣) ⋅ ϕ(a1,... ,ar) ⋅ ρ(ar+1,... ,ar+s).
485
+ Proof. For (1), note that B2 = 0 follows since normalized Hochschild cochains vanish when
486
+ any input is the unit 1. Part (2) was proved in [T2, section 3.3].
487
+
488
+ Since lemma 3.4 (1) and (2) are conditions needed for a BV algebra, i.e., a square zero
489
+ operator (here: B) whose deviation from being a derivation is a Gerstenhaber bracket (here:
490
+ ∆F), we make the following definition.
491
+ Definition 3.6. Let A be a dg-algebra, and let F ∶ A → A∗ be a homotopy inner product
492
+ for A. We call F a Poincar´e duality structure for A, if it satisfies if it satisfies the following
493
+ two conditions:
494
+ (1) F induces an isomorphism of graded modules on Hochschild cohomology F =
495
+ HH(F) ∶ HH●(A,A)
496
+
497
+ �→ HH●(A,A∗).
498
+ (2) Transferring B from HH●(A,A∗) to HH●(A,A) via F equals ∆F, i.e.,
499
+ HH●(A,A)
500
+ F
501
+
502
+ HH●(A,A∗)
503
+ F−1
504
+
505
+ B
506
+
507
+ F −1 ○ B ○ F = ∆F = F −1 ○ ZF
508
+ HH●(A,A)
509
+ ∆F
510
+
511
+ =
512
+ HH●(A,A)
513
+ ZF
514
+
515
+ HH●(A,A∗)
516
+ F−1
517
+
518
+ With this, we obtain the following theorem, which is [T2, theorem 2].
519
+ Theorem 3.7. Let A be a dg-algebra, and let F ∶ A → A∗ be a Poincar´e duality structure
520
+ for A. Then, HH●(A,A) together with the cup product and ∆F is a BV algebra.
521
+ Examples 3.8. We now give a few examples for how one can check whether a homotopy
522
+ inner product is a Poincar´e duality structure.
523
+ (1) Let F be an inner product for a dg-algebra A (i.e., the only non-zero component is
524
+ F0,0), and assume that F induces an isomorphism on Hochschild cohomology. Then
525
+ F is a Poincar´e duality structure.
526
+ (To see this, we note that this is a special case of the next example (2) below,
527
+ since the condition (D1F)0,1 = 0 gives (using (2.4)) that F(mb)(n) = F(m)(bn),
528
+ and (D1F)1,0 = 0 gives F(am)(n) = (−1)∣a∣⋅(∣m∣+∣n∣)F(m)(na) for all m,n,a,b ∈
529
+ A, and, thus F(m)(n) = F(1 ⋅ m)(n) = F(1)(m ⋅ n) = (−1)∣m∣⋅∣n∣F(n ⋅ 1)(m) =
530
+ (−1)∣m∣⋅∣n∣F(n)(m).)
531
+
532
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
533
+ 9
534
+ (2) Let F be a homotopy inner product for a dg-algebra A, and assume that F in-
535
+ duces an isomorphism on Hochschild cohomology. Assume further that F is in-
536
+ variant under cyclic rotation of the first p + 1 and last q + 1 inputs, i.e., ∀p,q ≥ 0,
537
+ ∀a1,... ,ap,m,b1,... ,bq,n ∈ A:
538
+ Fp,q(a1,... ,ap;m;b1,... ,bq)(n) = (−1)ε ⋅ Fq,p(b1,... ,bq;n;a1,... ,ap)(m)
539
+ a1
540
+ a2
541
+ ...
542
+ ap
543
+ m
544
+ b1
545
+ b2
546
+ ...
547
+ bq
548
+ n = (−1)ε ⋅
549
+ b1
550
+ b2
551
+ ...
552
+ bq
553
+ n
554
+ a1
555
+ a2
556
+ ...
557
+ ap
558
+ m
559
+ where (−1)ε = (−1)(∣a1∣+⋅⋅⋅+∣ap∣+∣m∣)⋅(∣b1∣+⋅⋅⋅+∣bq∣+∣n∣). Then F is a Poincar´e duality struc-
560
+ ture. This was proved in [T2, lemma 17].
561
+ (3) In the examples in section 5 of this paper we will check if a given homotopy inner
562
+ product F is a Poincar´e duality structure or not by explicitly computing F, B and
563
+ ZF; cf. the examples given in 5.3, 5.4 and 5.5.
564
+ In particular, it is worth noting that there exist homotopy inner products F for
565
+ which F is an isomorphism, but for which F −1 ○ B ○ F ≠ ∆F. We provide an explicit
566
+ example for such a homotopy inner product in 5.5.
567
+ 4. Computations of bimodule maps with higher homotopies
568
+ In this section, we will give explicit bimodule maps and bimodule maps with higher
569
+ homotopies for specific dg-algebras. In particular we will compute the homotopy inner
570
+ product on H●(S2;Z2) coming from a pullback of a local homotopy inner product. In the
571
+ remainder of this paper –with the exception of observation 4.4– the ground ring will always
572
+ be R = Z2.
573
+ Example 4.1 (H●(S2;Z2) inner product without homotopies). Consider the dg-algebra
574
+ A ∶= H●(S2;Z2) ≅ Z2.e ⊕ Z2.s with zero differential and degrees ∣e∣ = 0 and ∣s∣ = −2, and
575
+ where e is the unit and s ⋅ s = 0. For the dg module and dual dg module of A we use
576
+ the notation A ≅ Z2.e ⊕ Z2.s and A∗ ≅ Z2.e∗ ⊕ Z2.s∗, respectively, where e∗ and s∗ are
577
+ the duals of e and s with ∣e∗∣ = 0 and ∣s∗∣ = 2 and the module structure is given by
578
+ e.e∗ = e∗.e = e∗,e.s∗ = s∗.e = s∗,s.e∗ = e∗.s = 0,s.s∗ = s∗.s = e∗ (see 2.1).
579
+ Define the dg bimodule map F ∶ A → A∗ by F(s) = e∗ and F(e) = s∗,
580
+ A
581
+ = ...
582
+ ⟨s⟩
583
+ 0
584
+ ⟨e⟩
585
+ 0
586
+ 0
587
+ ...
588
+ A∗
589
+ = ...
590
+ 0
591
+ 0
592
+ ⟨e∗⟩
593
+ 0
594
+ ⟨s∗⟩
595
+ ...
596
+ F
597
+ F
598
+ F
599
+ Thus, F ∶ A → A∗ is the map F(s)(e) = 1,F(s)(s) = 0,F(e)(e) = 0,F(e)(s) = 1 (in other
600
+ words, F is given by capping with s∗). One can check directly that F is a dg bimodule
601
+ map. Note, that when interpreting F as an inner product <,>= F ∶ A ⊗ A → Z2, the only
602
+ non-vanishing inner products are < s,e >= 1 and < e,s >= 1.
603
+
604
+ 10
605
+ K. POIRIER AND T. TRADLER
606
+ Example 4.2 (H●(S2;Z2) inner product with homotopies). We next consider the same
607
+ algebra A and module structures A and A∗ as in example 4.1, but we define an inner
608
+ product ̃F for A with higher homotopies (in the sense of 2.2). In fact, ̃F has its only
609
+ non-zero components given by ̃F0,0 ∶ A → A∗ and ̃F2,0 ∶ A ⊗ A ⊗ A → A∗ (see 2.2) via
610
+ ̃F0,0(s)(e) = 1,
611
+ ̃F0,0(e)(s) = 1,
612
+ ̃F2,0(s,s;e)(e) = 1.
613
+ (4.3)
614
+ s
615
+ e
616
+ e
617
+ s
618
+ s
619
+ s
620
+ e
621
+ e
622
+ It is again a direct check that ̃F is a homotopy inner product, i.e., it satisfies all equation
623
+ required by 0 = DF = D1F given by (2.4) (since d and thus D0 vanishes). Explicitly, these
624
+ equations are:
625
+ ̃F0,0(a ⋅ a1)(̃a) =̃F0,0(a)(a1 ⋅ ̃a),
626
+ ̃F0,0(a1 ⋅ a)(̃a) =̃F0,0(a)(̃a ⋅ a1),
627
+ ̃F2,0(a1,a2;a ⋅ a3)(̃a) =̃F2,0(a1,a2;a)(a3 ⋅ ̃a),
628
+ 0 =̃F2,0(a1,a2;a3 ⋅ a)(̃a) + ̃F2,0(a1,a2 ⋅ a3;a)(̃a)
629
+ + ̃F2,0(a1 ⋅ a2,a3;a)(̃a) + ̃F2,0(a2,a3;a)(̃a ⋅ a1)
630
+ for all a,̃a ∈ A, and a1,a2,a3 ∈ A.
631
+ We next give formulas for calculating homotopy inner products (over Z2) for (triangu-
632
+ lated) 2-dimensional spaces on the cochain level.
633
+ The following observation notes that
634
+ higher homotopies naturally appear for inner products on the cochain level.
635
+ Observation 4.4. Let A be a unital dg-algebra over any commutative ring R, so that
636
+ both A ∶= A and A∗ are dg-modules over A, (see 2.1). Now, let x ∈ A∗ be a fixed closed
637
+ element, d(x) = 0. Define F ∶ A → A∗ for a ∈ A by setting F(a) ∈ A∗ evaluated on some
638
+ ̃a ∈ A to be F(a)(̃a) ∶= (x ⌢ a)(̃a) = x(a ⋅ ̃a).
639
+ Claim: F is chain map, and a graded right module map. F is in general not a graded
640
+ left module map. A sufficient condition for F being a graded left module map is that A is
641
+ graded commutative.
642
+ Proof. First, F is chain map, since for a,̃a ∈ A and dx = 0, we have F(da)(̃a) = x(da ⋅ ̃a) =
643
+ x(d(a ⋅ ̃a) − (−1)∣a∣a ⋅ d̃a) = (−1)∣x∣+1dx(a ⋅ ̃a) − (−1)∣a∣ ⋅ F(a)(d̃a) = −(−1)∣a∣ ⋅ (−1)∣F(a)∣+1 ⋅
644
+ d(F(a))(̃a) = (−1)∣F∣⋅d(F(a))(̃a). Next, F is a graded right module map, since for a,̃a ∈ A,
645
+ a1 ∈ A, we have (F(a).a1)(̃a) = F(a)(a1 ⋅ ̃a) = x(a ⋅ a1 ⋅ ̃a) = F(a.a1)(̃a). To check when
646
+ F is a graded left module map, compute F(a1.a)(̃a) = x(a1 ⋅ a ⋅ ̃a) and (a1.F(a))(̃a) =
647
+ (−1)∣a1∣⋅(∣F(a)∣+∣̃a∣) ⋅ F(a)(̃a ⋅ a1) = (−1)∣a1∣⋅(∣F∣+∣a∣+∣̃a∣) ⋅ x(a ⋅ ̃a ⋅ a1). Thus, F is in general not a
648
+ graded left module map. Moreover, if a1 ⋅ a ⋅ ̃a = (−1)∣a1∣⋅(∣a∣+∣̃a∣)a ⋅ ̃a ⋅ a1 for all a,̃a,a1, then
649
+ (a1.F(a))(̃a) = (−1)∣a1∣⋅∣F∣ ⋅ F(a1.a)(̃a), and thus F will be a graded left module map.
650
+
651
+ In particular, assume that X is a closed, oriented manifold, and R is a commutative
652
+ ring. In order to calculate the BV algebra on HH●(C●(X;R);(C●(X;R))∗) one might try
653
+ to use capping a cochain with a fundamental cycle x of X as the appropriate dg-bimodule
654
+ map F = x ⌢ − ∶ C●(X;R) → C●(X;R)
655
+ incl
656
+ ↪ (C●(X;R))∗. However, the above observation
657
+ shows that capping on the (co-)chain level is in general not a dg-bimodule map, and thus
658
+
659
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
660
+ 11
661
+ does not induce a chain map on the Hochschild cochains. One way to resolve this issue
662
+ is to provide higher homotopies for the left module structure, i.e., to provide a homotopy
663
+ inner product, which then does give a corresponding chain map on Hochschild cochains.
664
+ This is what is done in this paper for S2 with Z2 coefficients.
665
+ Example 4.5 (The 0-simplex). Let A[0] ∶= Z2.e0, where e0 is the unit. A homotopy inner
666
+ product F[0] ∶ A[0] → (A[0])∗ is given by F[0](e0)(e0) = 1.
667
+ We will also vary the superscript in an obvious way, i.e., the dg-algebra A[1] ∶= Z2.e1 has
668
+ the homotopy inner product F[1] ∶ A[1] → (A[1])∗,F[1](e1)(e1) = 1, etc.
669
+ Example 4.6 (The 1-simplex). Let A[01] ∶= Z2.e0 ⊕ Z2.e1 ⊕ Z2.b01, where ∣e0∣ = ∣e1∣ = 0 and
670
+ ∣b01∣ = −1 with differential d(e0) = d(e1) = b01. The product is the usual cup product of
671
+ simplicial cochains, i.e., e0 ⋅ e0 = e0, e1 ⋅ e1 = e1, and e0 ⋅ b01 = b01 ⋅ e1 = b01. Note that the unit
672
+ of A[01] is e0 + e1.
673
+ We want to define an inner product F with lowest component non-vanishing only for
674
+ F(e0)(b01) = F(b01)(e1) = 1. Note that F is not a chain map, but F(d(a))(̃a)+F(a)(d(̃a)) =
675
+ F[0](a)(̃a) + F[1](a)(̃a) for all a,̃a ∈ A[01]. Moreover, F is a graded right module map, i.e.,
676
+ F(a ⋅ a1)(̃a) = F(a)(a1 ⋅ ̃a) for all a,̃a,a1 ∈ A[01], but F is not a graded left module map,
677
+ since F(e0 ⋅ e0)(b01) = 1 ≠ 0 = F(e0)(b01 ⋅ e0).
678
+ There is an inductive procedure (involving choices at each stage) for obtaining higher
679
+ homotopies that provide F with left modules homotopies, making it into a homotopy inner
680
+ product up to F[0] and F[1] from example 4.5 (interpreted as maps A[01] → (A[01])∗) . This
681
+ procedure was described in [TZS, Proposition 3.1.2]. Performing the induction leads to a
682
+ sequence of maps, which was first stated in [TZS, Proposition B.2]:
683
+ ∀k ≥ 0 ∶
684
+ F[01]
685
+ k,0 (b01,... ,b01;e0)(b01) = 1,
686
+ F[01]
687
+ k,0 (b01,... ,b01;b01)(e1) = 1,
688
+ (4.7)
689
+ b01
690
+ b01
691
+ ...
692
+ b01
693
+ e0
694
+ b01
695
+ b01
696
+ b01
697
+ ...
698
+ b01
699
+ b01
700
+ e1
701
+ All other inner products are zero. Note, this resolves the above problem of F[01]
702
+ 0,0 not being a
703
+ graded left module map, since, now, F[01]
704
+ 0,0 is a graded left module map up to the homotopy
705
+ F[01]
706
+ 1,0 ; for example:
707
+ (DF[01])1,0(e0;e0)(b01) = F[01]
708
+ 1,0 (d(e0);e0)(b01) + F[01]
709
+ 1,0 (e0;d(e0))(b01)
710
+ + F[01]
711
+ 0,0 (e0 ⋅ e0)(b01) + F[01]
712
+ 0,0 (e0)(b01 ⋅ e0) = 1 + 0 + 1 + 0 = 0.
713
+ From [TZS, Proposition B.2], which we will also prove in appendix A, we have that:
714
+ (4.8)
715
+ DF[01] = F[0] + F[1]
716
+ Again, we will need to vary the superscript in an obvious way, i.e., for the dg-algebra
717
+ A[12] there are maps F[12] which are given by replacing 0 and 1 in the above with 1 and 2,
718
+ respectively, etc.
719
+ Example 4.9 (The 2-simplex). We now describe inner product maps for the 2-simplex,
720
+ which extends the previous two examples of the 0- and 1-simplex. Let A[012] ∶= Z2.e0 ⊕
721
+ Z2.e1 ⊕ Z2.e2 ⊕ Z2.b01 ⊕ Z2.b02 ⊕ Z2.b12 ⊕ Z2.c012 with ∣ej∣ = 0, ∣bij∣ = −1 and ∣c012∣ = −2, and
722
+
723
+ 12
724
+ K. POIRIER AND T. TRADLER
725
+ differential d(e0) = b01 + b02, d(e1) = b01 + b12, d(e2) = b02 + b12 and d(bij) = c012 for all
726
+ 0 ≤ i < j ≤ 2. The multiplication is non-zero only for
727
+ ∀j ∶
728
+ ej ⋅ ej = ej,
729
+ ∀i < j ∶
730
+ ei ⋅ bij = bij,
731
+ bij ⋅ ej = bij,
732
+ (4.10)
733
+ e0 ⋅ c012 = c012,
734
+ c012 ⋅ e2 = c012,
735
+ b01 ⋅ b12 = c012.
736
+ Now, following the procedure (which uses locality) from [TZS, Proposition 3.1.2], we
737
+ define maps F[012]
738
+ k,0
739
+ whose only non-zero maps are given by the following equations (4.11)-
740
+ (4.18):
741
+ ∀k ≥ 0 ∶
742
+ F[012]
743
+ k,0 (b02,... ,b02;c012)(e2) = 1,
744
+ (4.11)
745
+ F[012]
746
+ k,0 (b02,... ,b02;e0)(c012) = 1,
747
+ (4.12)
748
+ F[012]
749
+ k,0 (b02,... ,b02;b01)(b12) = 1,
750
+ (4.13)
751
+ b02
752
+ b02
753
+ ...
754
+ b02
755
+ e2
756
+ c012
757
+ b02
758
+ b02
759
+ ...
760
+ b02
761
+ c012
762
+ e0
763
+ b02
764
+ b02
765
+ ...
766
+ b02
767
+ b12
768
+ b01
769
+ ∀k ≥ 0 ∶ ∀1 ≤ ℓ ≤ k + 1 ∶
770
+ F[012]
771
+ k+1,0(b01,... ,b01, c012
772
+
773
+ ℓth
774
+ ,b02,... ,b02;e0)(b01) = 1,
775
+ (4.14)
776
+ F[012]
777
+ k+1,0(b01,... ,b01, c012
778
+
779
+ ℓth
780
+ ,b02,... ,b02;b01)(e1) = 1,
781
+ (4.15)
782
+ F[012]
783
+ k+1,0(b02,... ,b02, c012
784
+
785
+ ℓth
786
+ ,b12,... ,b12;e1)(b12) = 1,
787
+ (4.16)
788
+ F[012]
789
+ k+1,0(b02,... ,b02, c012
790
+
791
+ ℓth
792
+ ,b12,... ,b12;b12)(e2) = 1,
793
+ (4.17)
794
+ b01
795
+ ...
796
+ b01
797
+ c012
798
+ b02...b02
799
+ b01
800
+ e0
801
+ b01
802
+ ...
803
+ b01
804
+ c012
805
+ b02...b02
806
+ e1
807
+ b01
808
+ b02
809
+ ...
810
+ b02
811
+ c012
812
+ b12...b12
813
+ b12
814
+ e1
815
+ b02
816
+ ...
817
+ b02
818
+ c012
819
+ b12...b12
820
+ e2
821
+ b12
822
+ (4.18)
823
+ ∀k ≥ 0 ∶ ∀1 ≤ ℓ1 < ℓ2 ≤ k + 2 ∶
824
+ F[012]
825
+ k+2,0(b01,... ,b01, c012
826
+
827
+ ℓ1th
828
+ ,b02,... ,b02, c012
829
+
830
+ ℓ2th
831
+ ,b12,... ,b12;e1)(e1) = 1.
832
+ b01...
833
+ b01
834
+ c012
835
+ b02
836
+ ...
837
+ b02
838
+ c012
839
+ b12...b12
840
+ e1
841
+ e1
842
+ We claim that the following equation holds, which will be proved in appendix A:
843
+ (4.19)
844
+ DF[012] = F[01] + F[02] + F[12]
845
+
846
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
847
+ 13
848
+ As before, the above will also be applied to obvious variations of the superscript, such
849
+ as, e.g, A[123] with maps F[123] given by replacing 0, 1 and 2 in the above with 1, 2 and 3,
850
+ respectively, etc.
851
+ Example 4.20 (C●(S2;Z2) inner product with homotopies). We now use a tetrahedral
852
+ triangulation of the 2-sphere. More precisely, we set
853
+ A = ⊕
854
+ 0≤j≤3
855
+ Z2.ej ⊕
856
+
857
+ 0≤i<j≤3
858
+ Z2.bij ⊕
859
+
860
+ 0≤i<j<l≤3
861
+ Z2.cijl
862
+ e0
863
+ e1
864
+ e2
865
+ e3
866
+ with ∣ej∣ = 0, ∣bij∣ = −1 and ∣cijl∣ = −2 and differential d(ej) = ∑i≠j b{ij}, d(bij) = ∑l≠i,j c{ijl},
867
+ and d(cijl) = 0, where the bracket “{}” denotes indices in ascending order, i.e., b{ij} =
868
+ {bij, for i < j
869
+ bji, for j < i and similarly for c{ijl}. The multiplication is similar to (4.10), where the
870
+ indices “012” are replaced by any 0 ≤ i < j < l ≤ 3:
871
+ ∀j ∶
872
+ ej ⋅ ej = ej,
873
+ ∀i < j ∶
874
+ ei ⋅ bij = bij,
875
+ bij ⋅ ej = bij,
876
+ (4.21)
877
+ ∀i < j < l ∶
878
+ ei ⋅ cijl = cijl,
879
+ cijl ⋅ el = cijl,
880
+ bij ⋅ bjl = cijl,
881
+ and all other multiplications vanish. Note in particular that A has the unit e0 +e1 +e2 +e3.
882
+ We define the homotopy inner product F for A ∶= A to be given by (4.11)-(4.18) with
883
+ “012” in (4.11)-(4.18) replaced by any 0 ≤ i < j < l ≤ 3, i.e.,
884
+ (4.22)
885
+ F ∶= F[012] + F[013] + F[023] + F[123].
886
+ Claim: F is a homotopy inner product, i.e., DF = 0.
887
+ Proof. We compute DF = DF[012] + DF[013] + DF[023] + DF[123]. We claim that we can use
888
+ equation (4.19) to evaluate this expression, which is not completely obvious since D in
889
+ (4.19) is for the dg-algebra A[012], whereas we need to apply D for A.
890
+ To see that (4.19) holds for the dg-algebra A, note that, for example, DF[012](a1,... )(ar)
891
+ applies a differential or a multiplication to the inputs aj ∈ A (as stated in (2.3) and (2.4)).
892
+ Now, if any of the inputs aj are generators of A which has a 3 in the index (such as, e.g.,
893
+ b13 ∈ A), then taking the differential or a multiplication will consist of sums of generators
894
+ with 3 in their indices, and thus DF[012] applied to it would vanish. On the other hand, if
895
+ all inputs aj applied to DF[012] are only given by generators indexed by 0, 1, and 2, then
896
+ DF[012] applied to it coincides with what we would get for A[012], because, the multiplication
897
+ in A equals the one in A[012], and the differential in A equals the differential in A[012] up
898
+ to generators with indices of 3. (For example, applying d(b12) = c012 + c123 in DF[012] will
899
+ vanish on c123 and will coincide with (4.19) on c012 = dA[012](b12) =(differential of b12 in
900
+ A[012]).)
901
+ Thus, DF = DF[012] + DF[013] + DF[023] + DF[123] = (F[01] + F[02] + F[12]) + (F[01] + F[03] +
902
+ F[13]) + (F[02] + F[03] + F[23]) + (F[12] + F[13] + F[23]) = 0.
903
+
904
+
905
+ 14
906
+ K. POIRIER AND T. TRADLER
907
+ Proposition 4.23. Let A be the dg-algebra from example 4.20, and let B = H●(S2;Z2) ≅
908
+ Z2.e ⊕ Z2.s be the dg-algebra from examples 4.1 and 4.2.
909
+ Then, the map f ∶ B → A,
910
+ f(e) ∶= e0 + e1 + e2 + e3 and f(s) ∶= c012, is a dg-algebra quasi-isomorphism.
911
+ Transferring the homotopy inner product F/A defined in (4.22) via the map f gives
912
+ f(F)/B = ̃F/B, where ̃F/B is the homotopy inner product defined in equation (4.3) from
913
+ example 4.2.
914
+ Proof. The transfered map of F/A from (4.22) is given by applying f to inputs from B and
915
+ then applying F/A; see (2.10). Since the image of f is spanned by c012 and e0 + e1 + e2 + e3,
916
+ it follows from (4.11)-(4.18) that the only non-zero components (after applying f) are:
917
+ F[012]
918
+ 0,0 (c012)(e2) = 1,
919
+ F[012]
920
+ 0,0 (e0)(c012) = 1,
921
+ F[012]
922
+ 2,0 (c012,c012;e1)(e1) = 1.
923
+ Thus, we get precisely the non-zero components of ̃F from (4.3) for f(F)/B.
924
+
925
+ 5. Computations of the BV algebras
926
+ We now compute the BV algebras on Hochschild cohomology induced by the homotopy
927
+ inner products considered in examples 4.1 and 4.2 in section 4. In particular, we com-
928
+ plete the computations from theorem 1.6 for the 2-sphere by computing HH●(H●,H●) and
929
+ HH●(H●,H●) for H● = H● = H●(S2;Z2) and H● = H●(S2;Z2), and we will check the for-
930
+ mulas for the maps F and ̃
931
+ F in (1.7) stated in theorem 1.6. In the section the ground ring
932
+ is R = Z2.
933
+ 5.1. Hochschild cohomology HH●(H●,H●) and the cup product. Denote by H● =
934
+ H●(S2;Z2) ≅ Z2.e ⊕ Z2.s from example 4.1 with ∣e∣ = 0 and ∣s∣ = −2. The generators of
935
+ normalized Hochschild cochains CH
936
+ ●(H●,H●) are φk, ψk for k ≥ 0 given by
937
+ φk(s,... ,s
938
+ ��������������������
939
+ k many
940
+ ) ∶= e,
941
+ ψk(s,... ,s
942
+ ��������������������
943
+ k many
944
+ ) ∶= s.
945
+ Note that ∣φk∣ = k, ∣ψk∣ = k − 2, since the shifted generator s is of degree ∣s∣ = −1. Applying
946
+ the Hochschild differential (see 2.1, and using d = 0 and s ⋅ s = 0, s ⋅ e = e ⋅ s = s), we see that
947
+ all φk and ψk are closed (over Z2). Thus,
948
+ HH●(H●,H●) ≅ ⊕
949
+ k≥0
950
+ Z2.φk ⊕ ⊕
951
+ k≥0
952
+ Z2.ψk.
953
+ The cup product (3.5) is readily checked to be φk ⌣ φℓ = φk+ℓ, ψk ⌣ ψℓ = 0, and φk ⌣ ψℓ =
954
+ ψℓ ⌣ φk = ψk+ℓ.
955
+ 5.2. Hochschild cohomology HH●(H●,H●) and Connes’ B-operator. Denote by H● =
956
+ H●(S2;Z2) ≅ Z2.e∗ ⊕ Z2.s∗ from example 4.1, with ∣e∗∣ = 0 and ∣s∗∣ = 2 and s.e∗ = e∗.s =
957
+ 0,s.s∗ = s∗.s = e∗. The generators of normalized Hochschild cochains CH
958
+ ●(H●,H●) are θk,
959
+ χk for k ≥ 0 given by
960
+ θk(s,... ,s
961
+ ��������������������
962
+ k many
963
+ ) ∶= s∗,
964
+ χk(s,... ,s
965
+ ��������������������
966
+ k many
967
+ ) ∶= e∗.
968
+ Note that ∣θk∣ = k + 2, ∣χk∣ = k. All θk and χk are closed under the Hochschild cochain
969
+ differential (over Z2), so that
970
+ HH●(H●,H●) ≅ ⊕
971
+ k≥0
972
+ Z2.θk ⊕ ⊕
973
+ k≥0
974
+ Z2.χk.
975
+
976
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
977
+ 15
978
+ Whereas HH●(H●,H●) has the cup product (3.5), HH●(H●,H●) has Connes’ B-operator
979
+ (3.2). We compute B from (3.2) to be (B(ϕ))( s,... ,s
980
+ ��������������������
981
+ (r−1) many
982
+ )(a) ∶= r ⋅ ϕ(s,... ,s,a,s,... ,s)(e),
983
+ which applied to θk and χk yields:
984
+ B(θr) = 0,
985
+ B(χr) = r ⋅ θr−1.
986
+ This confirms the expressions for B stated in theorem 1.6.
987
+ 5.3. F induced by example 4.1 and the BV delta ∆F. The dg-bimodule map F ∶ H● →
988
+ H●, F(s) = e∗ and F(e) = s∗, from example 4.1 induced a map on Hochschild cohomologies
989
+ F ∶ HH●(H●,H●) → HH●(H●,H●) via (2.6). Since F only has a lowest component F0,0
990
+ this is simply
991
+ F(φk) = F ○ φk = θk,
992
+ F(ψk) = F ○ ψk = χk.
993
+ F is clearly an isomorphism, and confirms F from theorem 1.6.
994
+ We also compute ∆F and check it is equal to B transferred to HH●(H●,H●), i.e., con-
995
+ dition (2) from definition 3.6 for a Poincar´e duality structure.
996
+ From (3.3), we get for
997
+ ϕ ∈ Hom(H●⊗r,H●) that ZF(ϕ)(s,... ,s)(a) = r ⋅ F(e)(ϕ(s,... ,a,... ,s)), which gives
998
+ ZF(φk) = 0 = B(θk) = B ○ F(φk),
999
+ ZF(ψk) = k ⋅ θk−1 = B(χk) = B ○ F(ψk).
1000
+ Therefore, F −1 ○ B ○ F = F −1 ○ ZF = ∆F.
1001
+ 5.4. ̃
1002
+ F induced by example 4.2 and the BV delta ∆̃F. Consider the dg-bimodule
1003
+ map ̃F ∶ H● → H● from example 4.2. Since ̃F = ̃F0,0 + ̃F2,0 is ̃F0,0(s) = e∗, ̃F0,0(e) = s∗ with
1004
+ one extra homotopy ̃F2,0 which is non-zero only for ̃F2,0(s,s;e) ∶= e∗ (see (4.3)), we get
1005
+ ̃
1006
+ F(φk) = ̃F0,0 ○ φk + ̃F2,0(−,−;φk(−)) = θk + χk+2,
1007
+ ̃
1008
+ F(ψk) = ̃F0,0 ○ ψk = χk.
1009
+ This confirms ̃
1010
+ F from theorem 1.6. As was noted in theorem 1.6, ̃
1011
+ F is an isomorphism with
1012
+ inverse ̃
1013
+ F −1 given by ̃
1014
+ F −1(θk) = φk + ψk+2 and ̃
1015
+ F −1(χk) = ψk.
1016
+ We also check that in this case ̃
1017
+ F −1○B○ ̃
1018
+ F = ∆̃F. From (3.3), we get for ϕ ∈ Hom(H●⊗r,H●),
1019
+ that
1020
+ Z
1021
+ ̃F(ϕ)(s,... ,s)(a) =r ⋅ ̃F0,0(e)(ϕ(s,... ,a,... ,s)) + r ⋅ ̃F2,0(s,s;e)(ϕ(s,... ,a,... ,s))
1022
+ + ̃F2,0(ϕ(s,... ,s),s;e)(a) + ̃F2,0(s,ϕ(s,... ,s);e)(a)
1023
+ which gives
1024
+ Z
1025
+ ̃F(φk) = 0 + k ⋅ θk+1 + 0 + 0 = k ⋅ θk+1,
1026
+ Z
1027
+ ̃F(ψk) = k ⋅ θk−1 + 0 + χk+1 + χk+1 = k ⋅ θk−1.
1028
+ Since B ○ ̃
1029
+ F(φk) = B(θk + χk+2) = k ⋅ θk+1 and B ○ ̃
1030
+ F(ψk) = B(χk) = k ⋅ θk−1, this shows that
1031
+ Z ̃F = B ○ ̃
1032
+ F and, thus, ̃
1033
+ F −1 ○ B ○ ̃
1034
+ F = ̃
1035
+ F −1 ○ Z ̃F = ∆̃F. Explicitly, ∆̃F is given by:
1036
+
1037
+ ̃F(φk) = ̃
1038
+ F −1 ○ Z
1039
+ ̃F(φk) = ̃
1040
+ F −1(k ⋅ θk+1) = k ⋅ (φk+1 + ψk+3)
1041
+
1042
+ ̃F(ψk) = ̃
1043
+ F −1 ○ Z
1044
+ ̃F(ψk) = ̃
1045
+ F −1(k ⋅ θk−1) = k ⋅ (φk−1 + ψk+1)
1046
+ This confirms ̃∆ from (1.9) and thus theorem 1.6.
1047
+
1048
+ 16
1049
+ K. POIRIER AND T. TRADLER
1050
+ 5.5. Example of a homotopy inner product that is not a Poincar´e duality struc-
1051
+ ture. We end this section with an example of a homotopy inner product which is not
1052
+ a Poincar´e duality structure due to its failure of condition (2) from definition 3.6 (while
1053
+ condition (1) holds). We therefore do not get a BV algebra on Hochschild cohomology
1054
+ HH●(A,A) with underlying algebra A, since there is no ∆ operator, but only an operator,
1055
+ F −1 ○B ○F, that squares to zero, and a different operator, ∆F, whose deviation from being
1056
+ a derivation is a Gerstenhaber bracket; cf. lemma 3.4.
1057
+ Let A = Z2.e ⊕ Z2.b ⊕ Z2.c with zero differential, ∣e∣ = 0, ∣b∣ = −1, ∣c∣ = −2, and where
1058
+ e is the unit, b ⋅ b = c and all other products vanish.
1059
+ We denote A = A and its dual
1060
+ A∗ = Z2.e∗ ⊕ Z2.b∗ ⊕ Z2.c∗ where e∗,b∗,c∗ are the duals of e,b,c, respectively, so that
1061
+ ∣e∗∣ = 0, ∣b∗∣ = 1, ∣c∗∣ = 2. Define the homotopy inner product F whose only non-vanishing
1062
+ components are F0,0 and F3,0 given by
1063
+ F0,0(c)(e) = F0,0(b)(b) = F0,0(e)(c) = 1,
1064
+ F3,0(c,b,c;e)(e) = F3,0(b,b,b;c)(e) = F3,0(b,b,b;e)(c) = F3,0(b,b,b;b)(b) = 1.
1065
+ One can check explicitly that the conditions for a homotopy inner product 0 = DF = D1F,
1066
+ where D1 is given by (2.4), are satisfied.
1067
+ Note, that F induces an isomorphism on Hochschild cochains F ∶ CH
1068
+ ●(A,A)
1069
+
1070
+ �→
1071
+ CH
1072
+ ●(A,A∗), since F0,0 ∶ A → A∗ is an isomorphism, and thus F satisfies 3.6 condition (1).
1073
+ We next show that 3.6 condition (2) fails, i.e., that B○F ≠ ZF. Consider ϕ ∈ Hom(A⊗0,A),
1074
+ ϕ(1) = e. Note that ϕ is a closed element in the Hochschild cochains ϕ ∈ CH
1075
+ ●(A,A). Plug-
1076
+ ging ϕ into (3.3) gives ZF(ϕ) = 0, since each individual summand of (3.3) vanishes. Next,
1077
+ to compute F(ϕ), we see the only non-zero evaluation of F(ϕ) are (the ones with “e” in
1078
+ the input spot for the module of F):
1079
+ F(ϕ)(1) = F0,0(ϕ(1)) = c∗
1080
+ F(ϕ)(c,b,c) = F3,0(c,b,c;ϕ(1)) = e∗
1081
+ F(ϕ)(b,b,b) = F3,0(b,b,b;ϕ(1)) = c∗
1082
+ Applying B from (3.2) (note that the unit in A is in this example written as e) to this is
1083
+ only non-zero on the middle term, for which we get the only non-zero evaluations:
1084
+ B(F(ϕ))(c,b)(c) = 1,
1085
+ B(F(ϕ))(b,c)(c) = 1,
1086
+ B(F(ϕ))(c,c)(b) = 1.
1087
+ Then, B(F(ϕ)) is non-vanishing, and closed in CH
1088
+ ●(A,A∗), which follows either because
1089
+ ϕ is closed and B ○ F is a chain map or by direct inspection.
1090
+ Claim: B(F(ϕ)) is not exact in CH
1091
+ ●(A,A∗).
1092
+ The claim implies that B ○F(ϕ) ≠ 0 = ZF(ϕ) in HH●(A,A∗), and so F is not a Poincar´e
1093
+ duality structure as it fails condition (2) from definition 3.6.
1094
+ Proof of the claim: To prove the claim, note that d = 0 in A implies that the Hochschild
1095
+ differential D = D0 + D1 = D1 from 2.1 must increase the number of tensor factors of A for
1096
+ the inputs. Thus any ̃ϕ which makes B(F(ϕ)) exact, i.e., D(̃ϕ) = B(F(ϕ)), must have
1097
+ non-vanishing components with exaclty one tensor input. Since the degree ∣B(F(ϕ))∣ = 3,
1098
+ it must be that ∣̃ϕ∣ = 4. However, all Hochschild cochains in CH
1099
+ ●(A,A∗) with one or no
1100
+ input are of degree ≤ 3. Thus, B(F(ϕ)) cannot be exact.
1101
+
1102
+
1103
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
1104
+ 17
1105
+ Appendix A. Proof of equations (4.8) and (4.19)
1106
+ In this appendix we prove the identity (4.8), i.e., DF[01] = F[0] + F[1], for the maps F[01]
1107
+ given by (4.7) from example 4.6, as well as the identity (4.19), i.e., DF[012] = F[01] + F[02] +
1108
+ F[12], for the maps F[012] given by (4.11)-(4.18) from example 4.9. The ground ring in this
1109
+ appendix is R = Z2.
1110
+ A.1. Preliminaries. Any Fp,q is a map Fp,q ∶ A⊗p ⊗ A ⊗ A⊗q → A∗, which is an element
1111
+ of Fp,q ∈ (A∗)⊗p ⊗ A∗ ⊗ (A∗)⊗q ⊗ A∗. Consider A[01] with generators e0,e1,b01, respectively
1112
+ A[012] with generators e0,e1,e2,b01,b12,b02,c012). For a multi-index I ∈ {0,1,2,01,02,12,012},
1113
+ define a∗
1114
+ I to be a∗
1115
+ 0 ∶= e∗
1116
+ 0, a∗
1117
+ 1 ∶= e∗
1118
+ 1, a∗
1119
+ 2 ∶= e∗
1120
+ 2, a∗
1121
+ 01 ∶= b∗
1122
+ 01, a∗
1123
+ 02 ∶= b∗
1124
+ 02, a∗
1125
+ 12 ∶= b∗
1126
+ 12, a∗
1127
+ 012 ∶= c∗
1128
+ 012. With
1129
+ this we use the following notation, for a given sequence of multi-indices I1,... ,Ip+q+2:
1130
+ (A.2)
1131
+ [ I1,... ,Ip ∣ Ip+1 ∣ Ip+2,... ,Ip+q+1 ∣ Ip+q+2 ]
1132
+ ∶= a∗
1133
+ I1 ⊗ ... ⊗ a∗
1134
+ Ip ⊗ a∗
1135
+ Ip+1 ⊗ a∗
1136
+ Ip+2 ⊗ ... ⊗ a∗
1137
+ Ip+q+1 ⊗ a∗
1138
+ Ip+q+2 ∈ (A∗)⊗p ⊗ A∗ ⊗ (A∗)⊗q ⊗ A∗
1139
+ Moreover, placing a tilde under the multi-index sums over any number of these indices, i.e.,
1140
+ for tildes at positions 1 ≤ k1 < k2 < ⋅⋅⋅ < kr ≤ p, we define
1141
+ (A.3)
1142
+ [ I1,... ,Ik1
1143
+ ̃
1144
+ ,... ,Ik2
1145
+ ̃
1146
+ ,... ,Ip ∣ Ip+1 ∣ Ip+2,... ,Ip+q+1 ∣ Ip+q+2 ]
1147
+ ∶=
1148
+
1149
+ n1,n2,...,nr≥0
1150
+ [ I1,... ,Ik1,... ,Ik1
1151
+ ���������������������������������������������
1152
+ n1 many
1153
+ ,... ,Ik2,... ,Ik2
1154
+ ���������������������������������������������
1155
+ n2 many
1156
+ ,... ,Ip ∣ Ip+1 ∣ Ip+2,... ,Ip+q+1 ∣ Ip+q+2 ]
1157
+ Since our examples are chosen to be “strict” on the right (see observation 4.4), we only
1158
+ need higher homotopies (and thus tildes) on the first p tensor factors, but not on the other
1159
+ q tensor factors of A∗.
1160
+ We apply D = D0 +D1 from (2.3) and (2.4) to the above. Here, D0 applies the differential
1161
+ of A∗, which, in the multi-indiex notation, removes one index, (for example, 01 ↦ 0 + 1,
1162
+ 012 ↦ 01 + 02 + 12, etc.). D1 applies a multiplication, which on A∗ applies the Alexander-
1163
+ Whitney coproduct (for example, 01 ↦ 0 ⊗ 01 + 01 ⊗ 1, 012 ↦ 0 ⊗ 012 + 01 ⊗ 12 + 012 ⊗ 2,
1164
+ etc.); see also the calculation below.
1165
+ A.4. Proof of (4.8). F[01] from (4.7) is given by F[01] = [ 01
1166
+ ̃ ∣ 0 ∣ ∣ 01 ] + [ 01
1167
+ ̃ ∣ 01 ∣ ∣ 1 ].
1168
+ We calculate D = D0 + D1 applied to F[01].
1169
+ D0([ 01
1170
+ ̃ ∣ 0 ∣ ∣ 01 ]) =
1171
+ [ 01
1172
+ ̃,0, 01
1173
+ ̃ ∣ 0 ∣ ∣ 01 ] 01
1174
+ +[ 01
1175
+ ̃,1, 01
1176
+ ̃ ∣ 0 ∣ ∣ 01 ] 02
1177
+ +[ 01
1178
+ ̃ ∣ 0 ∣ ∣ 0 ] 03
1179
+ +[ 01
1180
+ ̃ ∣ 0 ∣ ∣ 1 ] 04
1181
+ D1([ 01
1182
+ ̃ ∣ 0 ∣ ∣ 01 ]) =
1183
+ [ 01
1184
+ ̃,0,01, 01
1185
+ ̃ ∣ 0 ∣ ∣ 01 ] 01
1186
+ +[ 01
1187
+ ̃,01,1, 01
1188
+ ̃ ∣ 0 ∣ ∣ 01 ] 02
1189
+ +[ 01
1190
+ ̃,0 ∣ 0 ∣ ∣ 01 ] 01
1191
+ +[ 01
1192
+ ̃ ∣ 0 ∣ 0 ∣ 01 ] 05
1193
+ +[ 01
1194
+ ̃ ∣ 0 ∣ 0 ∣ 01 ] 05
1195
+ +[ 01
1196
+ ̃ ∣ 0 ∣ 01 ∣ 1 ] 06
1197
+ +[ 01, 01
1198
+ ̃ ∣ 0 ∣ ∣ 0 ] 03
1199
+ +[ 1, 01
1200
+ ̃ ∣ 0 ∣ ∣ 01 ] 02
1201
+ D0([ 01
1202
+ ̃ ∣ 01 ∣ ∣ 1 ]) =
1203
+ [ 01
1204
+ ̃,0, 01
1205
+ ̃ ∣ 01 ∣ ∣ 1 ] 07
1206
+ +[ 01
1207
+ ̃,1, 01
1208
+ ̃ ∣ 01 ∣ ∣ 1 ] 08
1209
+ +[ 01
1210
+ ̃ ∣ 0 ∣ ∣ 1 ] 04
1211
+ +[ 01
1212
+ ̃ ∣ 1 ∣ ∣ 1 ] 09
1213
+
1214
+ 18
1215
+ K. POIRIER AND T. TRADLER
1216
+ D1([ 01
1217
+ ̃ ∣ 01 ∣ ∣ 1 ]) =
1218
+ [ 01
1219
+ ̃,0,01, 01
1220
+ ̃ ∣ 01 ∣ ∣ 1 ] 07
1221
+ +[ 01
1222
+ ̃,01,1, 01
1223
+ ̃ ∣ 01 ∣ ∣ 1 ] 08
1224
+ +[ 01
1225
+ ̃,0 ∣ 01 ∣ ∣ 1 ] 07
1226
+ +[ 01
1227
+ ̃,01 ∣ 1 ∣ ∣ 1 ] 09
1228
+ +[ 01
1229
+ ̃ ∣ 0 ∣ 01 ∣ 1 ] 06
1230
+ +[ 01
1231
+ ̃ ∣ 01 ∣ 1 ∣ 1 ] 10
1232
+ +[ 01
1233
+ ̃ ∣ 01 ∣ 1 ∣ 1 ] 10
1234
+ +[ 1, 01
1235
+ ̃ ∣ 01 ∣ ∣ 1 ] 08
1236
+ The gray box on the right of each expression indicates which terms can be combined. Note
1237
+ that for
1238
+ 01 ,
1239
+ 02 ,
1240
+ 07 , and
1241
+ 08 , there are always 3 terms that can be combined and cancel. In
1242
+ fact, all terms cancel, except for terms labeled
1243
+ 03 , which has a left over term of [ ∣ 0 ∣ ∣ 0 ],
1244
+ and terms labeled
1245
+ 09 , which has a left over term of [ ∣ 1 ∣ ∣ 1 ]. These two left-over terms
1246
+ are F[0] and F[1], which shows that DF[01] = F[0] + F[1], i.e., equation (4.8).
1247
+ A.5. Proof of (4.19). F[012] from (4.11)-(4.18) is given by
1248
+ F[012] =[ 02
1249
+ ̃ ∣ 012 ∣ ∣ 2 ] + [ 02
1250
+ ̃ ∣ 0 ∣ ∣ 012 ] + [ 02
1251
+ ̃ ∣ 01 ∣ ∣ 12 ]
1252
+ + [ 01
1253
+ ̃,012, 02
1254
+ ̃ ∣ 0 ∣ ∣ 01 ] + [ 01
1255
+ ̃,012, 02
1256
+ ̃ ∣ 01 ∣ ∣ 1 ]
1257
+ + [ 02
1258
+ ̃,012, 12
1259
+ ̃ ∣ 1 ∣ ∣ 12 ] + [ 02
1260
+ ̃,012, 12
1261
+ ̃ ∣ 12 ∣ ∣ 2 ]
1262
+ + [ 01
1263
+ ̃,012, 02
1264
+ ̃,012, 12
1265
+ ̃ ∣ 1 ∣ ∣ 1 ]
1266
+ We calculate D = D0 + D1 applied to F[012]. Again, the gray box indicates which terms
1267
+ combine and cancel. Left-over terms will be collected below.
1268
+ D0([ 02
1269
+ ̃ ∣ 012 ∣ ∣ 2 ]) =
1270
+ [ 02
1271
+ ̃,0, 02
1272
+ ̃ ∣ 012 ∣ ∣ 2 ] 01
1273
+ +[ 02
1274
+ ̃,2, 02
1275
+ ̃ ∣ 012 ∣ ∣ 2 ] 02
1276
+ +[ 02
1277
+ ̃ ∣ 01 ∣ ∣ 2 ] 03
1278
+ +[ 02
1279
+ ̃ ∣ 02 ∣ ∣ 2 ] 04
1280
+ +[ 02
1281
+ ̃ ∣ 12 ∣ ∣ 2 ] 05
1282
+ D1([ 02
1283
+ ̃ ∣ 012 ∣ ∣ 2 ]) =
1284
+ [ 02
1285
+ ̃,0,02, 02
1286
+ ̃ ∣ 012 ∣ ∣ 2 ] 01
1287
+ +[ 02
1288
+ ̃,02,2, 02
1289
+ ̃ ∣ 012 ∣ ∣ 2 ] 02
1290
+ +[ 02
1291
+ ̃,0 ∣ 012 ∣ ∣ 2 ] 01
1292
+ +[ 02
1293
+ ̃,01 ∣ 12 ∣ ∣ 2 ] 06
1294
+ +[ 02
1295
+ ̃,012 ∣ 2 ∣ ∣ 2 ] 07
1296
+ +[ 02
1297
+ ̃ ∣ 012 ∣ 2 ∣ 2 ] 08
1298
+ +[ 02
1299
+ ̃ ∣ 01 ∣ 12 ∣ 2 ] 09
1300
+ +[ 02
1301
+ ̃ ∣ 0 ∣ 012 ∣ 2 ] 10
1302
+ +[ 02
1303
+ ̃ ∣ 012 ∣ 2 ∣ 2 ] 08
1304
+ +[ 2, 02
1305
+ ̃ ∣ 012 ∣ ∣ 2 ] 02
1306
+ D0([ 02
1307
+ ̃ ∣ 0 ∣ ∣ 012 ]) =
1308
+ [ 02
1309
+ ̃,0, 02
1310
+ ̃ ∣ 0 ∣ ∣ 012 ] 11
1311
+ +[ 02
1312
+ ̃,2, 02
1313
+ ̃ ∣ 0 ∣ ∣ 012 ] 12
1314
+ +[ 02
1315
+ ̃ ∣ 0 ∣ ∣ 01 ] 13
1316
+ +[ 02
1317
+ ̃ ∣ 0 ∣ ∣ 02 ] 14
1318
+ +[ 02
1319
+ ̃ ∣ 0 ∣ ∣ 12 ] 15
1320
+ D1([ 02
1321
+ ̃ ∣ 0 ∣ ∣ 012 ]) =
1322
+ [ 02
1323
+ ̃,0,02, 02
1324
+ ̃ ∣ 0 ∣ ∣ 012 ] 11
1325
+ +[ 02
1326
+ ̃,02,2, 02
1327
+ ̃ ∣ 0 ∣ ∣ 012 ] 12
1328
+ [ 02
1329
+ ̃,0 ∣ 0 ∣ ∣ 012 ] 11
1330
+ +[ 02
1331
+ ̃ ∣ 0 ∣ 0 ∣ 012 ] 16
1332
+ [ 02
1333
+ ̃ ∣ 0 ∣ 0 ∣ 012 ] 16
1334
+ +[ 02
1335
+ ̃ ∣ 0 ∣ 01 ∣ 12 ] 17
1336
+ [ 02
1337
+ ̃ ∣ 0 ∣ 012 ∣ 2 ] 10
1338
+ +[ 2, 02
1339
+ ̃ ∣ 0 ∣ ∣ 012 ] 12
1340
+
1341
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
1342
+ 19
1343
+ [ 12, 02
1344
+ ̃ ∣ 0 ∣ ∣ 01 ] 18
1345
+ +[ 012, 02
1346
+ ̃ ∣ 0 ∣ ∣ 0 ] 19
1347
+ D0([ 02
1348
+ ̃ ∣ 01 ∣ ∣ 12 ]) =
1349
+ [ 02
1350
+ ̃,0, 02
1351
+ ̃ ∣ 01 ∣ ∣ 12 ] 20
1352
+ +[ 02
1353
+ ̃,2, 02
1354
+ ̃ ∣ 01 ∣ ∣ 12 ] 21
1355
+ +[ 02
1356
+ ̃ ∣ 0 ∣ ∣ 12 ] 15
1357
+ +[ 02
1358
+ ̃ ∣ 1 ∣ ∣ 12 ] 22
1359
+ +[ 02
1360
+ ̃ ∣ 01 ∣ ∣ 1 ] 23
1361
+ +[ 02
1362
+ ̃ ∣ 01 ∣ ∣ 2 ] 03
1363
+ D1([ 02
1364
+ ̃ ∣ 01 ∣ ∣ 12 ]) =
1365
+ [ 02
1366
+ ̃,0,02, 02
1367
+ ̃ ∣ 01 ∣ ∣ 12 ] 20
1368
+ +[ 02
1369
+ ̃,02,2, 02
1370
+ ̃ ∣ 01 ∣ ∣ 12 ] 21
1371
+ +[ 02
1372
+ ̃,0 ∣ 01 ∣ ∣ 12 ] 20
1373
+ +[ 02
1374
+ ̃,01 ∣ 1 ∣ ∣ 12 ] 24
1375
+ +[ 02
1376
+ ̃ ∣ 01 ∣ 1 ∣ 12 ] 25
1377
+ +[ 02
1378
+ ̃ ∣ 0 ∣ 01 ∣ 12 ] 17
1379
+ +[ 02
1380
+ ̃ ∣ 01 ∣ 1 ∣ 12 ] 25
1381
+ +[ 02
1382
+ ̃ ∣ 01 ∣ 12 ∣ 2 ] 09
1383
+ +[ 2, 02
1384
+ ̃ ∣ 01 ∣ ∣ 12 ] 21
1385
+ +[ 12, 02
1386
+ ̃ ∣ 01 ∣ ∣ 1 ] 26
1387
+ D0([ 01
1388
+ ̃,012, 02
1389
+ ̃ ∣ 0 ∣ ∣ 01 ]) =
1390
+ [ 01
1391
+ ̃,0, 01
1392
+ ̃,012, 02
1393
+ ̃ ∣ 0 ∣ ∣ 01 ] 27
1394
+ +[ 01
1395
+ ̃,1, 01
1396
+ ̃,012, 02
1397
+ ̃ ∣ 0 ∣ ∣ 01 ] 28
1398
+ +[ 01
1399
+ ̃,01, 02
1400
+ ̃ ∣ 0 ∣ ∣ 01 ] 13
1401
+ +[ 01
1402
+ ̃,02, 02
1403
+ ̃ ∣ 0 ∣ ∣ 01 ] 13
1404
+ +[ 01
1405
+ ̃,12, 02
1406
+ ̃ ∣ 0 ∣ ∣ 01 ] 18
1407
+ +[ 01
1408
+ ̃,012, 02
1409
+ ̃,0, 02
1410
+ ̃ ∣ 0 ∣ ∣ 01 ] 29
1411
+ +[ 01
1412
+ ̃,012, 02
1413
+ ̃,2, 02
1414
+ ̃ ∣ 0 ∣ ∣ 01 ] 30
1415
+ +[ 01
1416
+ ̃,012, 02
1417
+ ̃ ∣ 0 ∣ ∣ 0 ] 19
1418
+ +[ 01
1419
+ ̃,012, 02
1420
+ ̃ ∣ 0 ∣ ∣ 1 ] 31
1421
+ D1([ 01
1422
+ ̃,012, 02
1423
+ ̃ ∣ 0 ∣ ∣ 01 ]) =
1424
+ [ 01
1425
+ ̃,0,01, 01
1426
+ ̃,012, 02
1427
+ ̃ ∣ 0 ∣ ∣ 01 ] 27
1428
+ +[ 01
1429
+ ̃,01,1, 01
1430
+ ̃,012, 02
1431
+ ̃ ∣ 0 ∣ ∣ 01 ] 28
1432
+ +[ 01
1433
+ ̃,0,012, 02
1434
+ ̃ ∣ 0 ∣ ∣ 01 ] 27
1435
+ +[ 01
1436
+ ̃,01,12, 02
1437
+ ̃ ∣ 0 ∣ ∣ 01 ] 18
1438
+ +[ 01
1439
+ ̃,012,2, 02
1440
+ ̃ ∣ 0 ∣ ∣ 01 ] 30
1441
+ +[ 01
1442
+ ̃,012, 02
1443
+ ̃,0,02, 02
1444
+ ̃ ∣ 0 ∣ ∣ 01 ] 29
1445
+ +[ 01
1446
+ ̃,012, 02
1447
+ ̃,02,2, 02
1448
+ ̃ ∣ 0 ∣ ∣ 01 ] 30
1449
+ +[ 01
1450
+ ̃,012, 02
1451
+ ̃,0 ∣ 0 ∣ ∣ 01 ] 29
1452
+ +[ 01
1453
+ ̃,012, 02
1454
+ ̃ ∣ 0 ∣ 0 ∣ 01 ] 32
1455
+ +[ 01
1456
+ ̃,012, 02
1457
+ ̃ ∣ 0 ∣ 0 ∣ 01 ] 32
1458
+ +[ 01
1459
+ ̃,012, 02
1460
+ ̃ ∣ 0 ∣ 01 ∣ 1 ] 33
1461
+ +[ 1, 01
1462
+ ̃,012, 02
1463
+ ̃ ∣ 0 ∣ ∣ 01 ] 28
1464
+ +[ 01, 01
1465
+ ̃,012, 02
1466
+ ̃ ∣ 0 ∣ ∣ 0 ] 19
1467
+ D0([ 01
1468
+ ̃,012, 02
1469
+ ̃ ∣ 01 ∣ ∣ 1 ]) =
1470
+ [ 01
1471
+ ̃,0, 01
1472
+ ̃,012, 02
1473
+ ̃ ∣ 01 ∣ ∣ 1 ] 34
1474
+ +[ 01
1475
+ ̃,1, 01
1476
+ ̃,012, 02
1477
+ ̃ ∣ 01 ∣ ∣ 1 ] 35
1478
+ +[ 01
1479
+ ̃,01, 02
1480
+ ̃ ∣ 01 ∣ ∣ 1 ] 23
1481
+ +[ 01
1482
+ ̃,02, 02
1483
+ ̃ ∣ 01 ∣ ∣ 1 ] 23
1484
+ +[ 01
1485
+ ̃,12, 02
1486
+ ̃ ∣ 01 ∣ ∣ 1 ] 26
1487
+ +[ 01
1488
+ ̃,012, 02
1489
+ ̃,0, 02
1490
+ ̃ ∣ 01 ∣ ∣ 1 ] 36
1491
+ +[ 01
1492
+ ̃,012, 02
1493
+ ̃,2, 02
1494
+ ̃ ∣ 01 ∣ ∣ 1 ] 37
1495
+ +[ 01
1496
+ ̃,012, 02
1497
+ ̃ ∣ 0 ∣ ∣ 1 ] 31
1498
+ +[ 01
1499
+ ̃,012, 02
1500
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1501
+
1502
+ 20
1503
+ K. POIRIER AND T. TRADLER
1504
+ D1([ 01
1505
+ ̃,012, 02
1506
+ ̃ ∣ 01 ∣ ∣ 1 ]) =
1507
+ [ 01
1508
+ ̃,0,01, 01
1509
+ ̃,012, 02
1510
+ ̃ ∣ 01 ∣ ∣ 1 ] 34
1511
+ +[ 01
1512
+ ̃,01,1, 01
1513
+ ̃,012, 02
1514
+ ̃ ∣ 01 ∣ ∣ 1 ] 35
1515
+ +[ 01
1516
+ ̃,0,012, 02
1517
+ ̃ ∣ 01 ∣ ∣ 1 ] 34
1518
+ +[ 01
1519
+ ̃,01,12, 02
1520
+ ̃ ∣ 01 ∣ ∣ 1 ] 26
1521
+ +[ 01
1522
+ ̃,012,2, 02
1523
+ ̃ ∣ 01 ∣ ∣ 1 ] 37
1524
+ +[ 01
1525
+ ̃,012, 02
1526
+ ̃,0,02, 02
1527
+ ̃ ∣ 01 ∣ ∣ 1 ] 36
1528
+ +[ 01
1529
+ ̃,012, 02
1530
+ ̃,02,2, 02
1531
+ ̃ ∣ 01 ∣ ∣ 1 ] 37
1532
+ +[ 01
1533
+ ̃,012, 02
1534
+ ̃,0 ∣ 01 ∣ ∣ 1 ] 36
1535
+ +[ 01
1536
+ ̃,012, 02
1537
+ ̃,01 ∣ 1 ∣ ∣ 1 ] 39
1538
+ +[ 01
1539
+ ̃,012, 02
1540
+ ̃ ∣ 01 ∣ 1 ∣ 1 ] 40
1541
+ +[ 01
1542
+ ̃,012, 02
1543
+ ̃ ∣ 0 ∣ 01 ∣ 1 ] 33
1544
+ +[ 01
1545
+ ̃,012, 02
1546
+ ̃ ∣ 01 ∣ 1 ∣ 1 ] 40
1547
+ +[ 1, 01
1548
+ ̃,012, 02
1549
+ ̃ ∣ 01 ∣ ∣ 1 ] 35
1550
+ D0([ 02
1551
+ ̃,012, 12
1552
+ ̃ ∣ 1 ∣ ∣ 12 ]) =
1553
+ [ 02
1554
+ ̃,0, 02
1555
+ ̃,012, 12
1556
+ ̃ ∣ 1 ∣ ∣ 12 ] 41
1557
+ +[ 02
1558
+ ̃,2, 02
1559
+ ̃,012, 12
1560
+ ̃ ∣ 1 ∣ ∣ 12 ] 42
1561
+ +[ 02
1562
+ ̃,01, 12
1563
+ ̃ ∣ 1 ∣ ∣ 12 ] 24
1564
+ +[ 02
1565
+ ̃,02, 12
1566
+ ̃ ∣ 1 ∣ ∣ 12 ] 22
1567
+ +[ 02
1568
+ ̃,12, 12
1569
+ ̃ ∣ 1 ∣ ∣ 12 ] 22
1570
+ +[ 02
1571
+ ̃,012, 12
1572
+ ̃,1, 12
1573
+ ̃ ∣ 1 ∣ ∣ 12 ] 43
1574
+ +[ 02
1575
+ ̃,012, 12
1576
+ ̃,2, 12
1577
+ ̃ ∣ 1 ∣ ∣ 12 ] 44
1578
+ +[ 02
1579
+ ̃,012, 12
1580
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1581
+ +[ 02
1582
+ ̃,012, 12
1583
+ ̃ ∣ 1 ∣ ∣ 2 ] 45
1584
+ D1([ 02
1585
+ ̃,012, 12
1586
+ ̃ ∣ 1 ∣ ∣ 12 ]) =
1587
+ [ 02
1588
+ ̃,0,02, 02
1589
+ ̃,012, 12
1590
+ ̃ ∣ 1 ∣ ∣ 12 ] 41
1591
+ +[ 02
1592
+ ̃,02,2, 02
1593
+ ̃,012, 12
1594
+ ̃ ∣ 1 ∣ ∣ 12 ] 42
1595
+ +[ 02
1596
+ ̃,0,012, 12
1597
+ ̃ ∣ 1 ∣ ∣ 12 ] 41
1598
+ +[ 02
1599
+ ̃,01,12, 12
1600
+ ̃ ∣ 1 ∣ ∣ 12 ] 24
1601
+ +[ 02
1602
+ ̃,012,2, 12
1603
+ ̃ ∣ 1 ∣ ∣ 12 ] 44
1604
+ +[ 02
1605
+ ̃,012, 12
1606
+ ̃,1,12, 12
1607
+ ̃ ∣ 1 ∣ ∣ 12 ] 43
1608
+ +[ 02
1609
+ ̃,012, 12
1610
+ ̃,12,2, 12
1611
+ ̃ ∣ 1 ∣ ∣ 12 ] 44
1612
+ +[ 02
1613
+ ̃,012, 12
1614
+ ̃,1 ∣ 1 ∣ ∣ 12 ] 43
1615
+ +[ 02
1616
+ ̃,012, 12
1617
+ ̃ ∣ 1 ∣ 1 ∣ 12 ] 46
1618
+ +[ 02
1619
+ ̃,012, 12
1620
+ ̃ ∣ 1 ∣ 1 ∣ 12 ] 46
1621
+ +[ 02
1622
+ ̃,012, 12
1623
+ ̃ ∣ 1 ∣ 12 ∣ 2 ] 47
1624
+ +[ 2, 02
1625
+ ̃,012, 12
1626
+ ̃ ∣ 1 ∣ ∣ 12 ] 42
1627
+ +[ 12, 02
1628
+ ̃,012, 12
1629
+ ̃ ∣ 1 ∣ ∣ 1 ] 48
1630
+ D0([ 02
1631
+ ̃,012, 12
1632
+ ̃ ∣ 12 ∣ ∣ 2 ]) =
1633
+ [ 02
1634
+ ̃,0, 02
1635
+ ̃,012, 12
1636
+ ̃ ∣ 12 ∣ ∣ 2 ] 49
1637
+ +[ 02
1638
+ ̃,2, 02
1639
+ ̃,012, 12
1640
+ ̃ ∣ 12 ∣ ∣ 2 ] 50
1641
+ +[ 02
1642
+ ̃,01, 12
1643
+ ̃ ∣ 12 ∣ ∣ 2 ] 06
1644
+ +[ 02
1645
+ ̃,02, 12
1646
+ ̃ ∣ 12 ∣ ∣ 2 ] 05
1647
+ +[ 02
1648
+ ̃,12, 12
1649
+ ̃ ∣ 12 ∣ ∣ 2 ] 05
1650
+ +[ 02
1651
+ ̃,012, 12
1652
+ ̃,1, 12
1653
+ ̃ ∣ 12 ∣ ∣ 2 ] 51
1654
+ +[ 02
1655
+ ̃,012, 12
1656
+ ̃,2, 12
1657
+ ̃ ∣ 12 ∣ ∣ 2 ] 52
1658
+ +[ 02
1659
+ ̃,012, 12
1660
+ ̃ ∣ 1 ∣ ∣ 2 ] 45
1661
+ +[ 02
1662
+ ̃,012, 12
1663
+ ̃ ∣ 2 ∣ ∣ 2 ] 07
1664
+ D1([ 02
1665
+ ̃,012, 12
1666
+ ̃ ∣ 12 ∣ ∣ 2 ]) =
1667
+
1668
+ A NOTE ON THE STRING TOPOLOGY BV-ALGEBRA FOR S2 WITH Z2 COEFFICIENTS
1669
+ 21
1670
+ [ 02
1671
+ ̃,0,02, 02
1672
+ ̃,012, 12
1673
+ ̃ ∣ 12 ∣ ∣ 2 ] 49
1674
+ +[ 02
1675
+ ̃,02,2, 02
1676
+ ̃,012, 12
1677
+ ̃ ∣ 12 ∣ ∣ 2 ] 50
1678
+ +[ 02
1679
+ ̃,0,012, 12
1680
+ ̃ ∣ 12 ∣ ∣ 2 ] 49
1681
+ +[ 02
1682
+ ̃,01,12, 12
1683
+ ̃ ∣ 12 ∣ ∣ 2 ] 06
1684
+ +[ 02
1685
+ ̃,012,2, 12
1686
+ ̃ ∣ 12 ∣ ∣ 2 ] 52
1687
+ +[ 02
1688
+ ̃,012, 12
1689
+ ̃,1,12, 12
1690
+ ̃ ∣ 12 ∣ ∣ 2 ] 51
1691
+ +[ 02
1692
+ ̃,012, 12
1693
+ ̃,12,2, 12
1694
+ ̃ ∣ 12 ∣ ∣ 2 ] 52
1695
+ +[ 02
1696
+ ̃,012, 12
1697
+ ̃,1 ∣ 12 ∣ ∣ 2 ] 51
1698
+ +[ 02
1699
+ ̃,012, 12
1700
+ ̃,12 ∣ 2 ∣ ∣ 2 ] 07
1701
+ +[ 02
1702
+ ̃,012, 12
1703
+ ̃ ∣ 12 ∣ 2 ∣ 2 ] 53
1704
+ +[ 02
1705
+ ̃,012, 12
1706
+ ̃ ∣ 1 ∣ 12 ∣ 2 ] 47
1707
+ +[ 02
1708
+ ̃,012, 12
1709
+ ̃ ∣ 12 ∣ 2 ∣ 2 ] 53
1710
+ +[ 2, 02
1711
+ ̃,012, 12
1712
+ ̃ ∣ 12 ∣ ∣ 2 ] 50
1713
+ D0([ 01
1714
+ ̃,012, 02
1715
+ ̃,012, 12
1716
+ ̃ ∣ 1 ∣ ∣ 1 ]) =
1717
+ [ 01
1718
+ ̃,0, 01
1719
+ ̃,012, 02
1720
+ ̃,012, 12
1721
+ ̃ ∣ 1 ∣ ∣ 1 ] 54
1722
+ +[ 01
1723
+ ̃,1, 01
1724
+ ̃,012, 02
1725
+ ̃,012, 12
1726
+ ̃ ∣ 1 ∣ ∣ 1 ] 55
1727
+ +[ 01
1728
+ ̃,01, 02
1729
+ ̃,012, 12
1730
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1731
+ +[ 01
1732
+ ̃,02, 02
1733
+ ̃,012, 12
1734
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1735
+ +[ 01
1736
+ ̃,12, 02
1737
+ ̃,012, 12
1738
+ ̃ ∣ 1 ∣ ∣ 1 ] 48
1739
+ +[ 01
1740
+ ̃,012, 02
1741
+ ̃,0, 02
1742
+ ̃,012, 12
1743
+ ̃ ∣ 1 ∣ ∣ 1 ] 56
1744
+ +[ 01
1745
+ ̃,012, 02
1746
+ ̃,2, 02
1747
+ ̃,012, 12
1748
+ ̃ ∣ 1 ∣ ∣ 1 ] 57
1749
+ +[ 01
1750
+ ̃,012, 02
1751
+ ̃,01, 12
1752
+ ̃ ∣ 1 ∣ ∣ 1 ] 39
1753
+ +[ 01
1754
+ ̃,012, 02
1755
+ ̃,02, 12
1756
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1757
+ +[ 01
1758
+ ̃,012, 02
1759
+ ̃,12, 12
1760
+ ̃ ∣ 1 ∣ ∣ 1 ] 38
1761
+ +[ 01
1762
+ ̃,012, 02
1763
+ ̃,012, 12
1764
+ ̃,1, 12
1765
+ ̃ ∣ 1 ∣ ∣ 1 ] 58
1766
+ +[ 01
1767
+ ̃,012, 02
1768
+ ̃,012, 12
1769
+ ̃,2, 12
1770
+ ̃ ∣ 1 ∣ ∣ 1 ] 59
1771
+ D1([ 01
1772
+ ̃,012, 02
1773
+ ̃,012, 12
1774
+ ̃ ∣ 1 ∣ ∣ 1 ]) =
1775
+ [ 01
1776
+ ̃,0,01, 01
1777
+ ̃,012, 02
1778
+ ̃,012, 12
1779
+ ̃ ∣ 1 ∣ ∣ 1 ] 54
1780
+ +[ 01
1781
+ ̃,01,1, 01
1782
+ ̃,012, 02
1783
+ ̃,012, 12
1784
+ ̃ ∣ 1 ∣ ∣ 1 ] 55
1785
+ +[ 01
1786
+ ̃,0,012, 02
1787
+ ̃,012, 12
1788
+ ̃ ∣ 1 ∣ ∣ 1 ] 54
1789
+ +[ 01
1790
+ ̃,01,12, 02
1791
+ ̃,012, 12
1792
+ ̃ ∣ 1 ∣ ∣ 1 ] 48
1793
+ +[ 01
1794
+ ̃,012,2, 02
1795
+ ̃,012, 12
1796
+ ̃ ∣ 1 ∣ ∣ 1 ] 57
1797
+ +[ 01
1798
+ ̃,012, 02
1799
+ ̃,0,02, 02
1800
+ ̃,012, 12
1801
+ ̃ ∣ 1 ∣ ∣ 1 ] 56
1802
+ +[ 01
1803
+ ̃,012, 02
1804
+ ̃,02,2, 02
1805
+ ̃,012, 12
1806
+ ̃ ∣ 1 ∣ ∣ 1 ] 57
1807
+ +[ 01
1808
+ ̃,012, 02
1809
+ ̃,0,012, 12
1810
+ ̃ ∣ 1 ∣ ∣ 1 ] 56
1811
+ +[ 01
1812
+ ̃,012, 02
1813
+ ̃,01,12, 12
1814
+ ̃ ∣ 1 ∣ ∣ 1 ] 39
1815
+ +[ 01
1816
+ ̃,012, 02
1817
+ ̃,012,2, 12
1818
+ ̃ ∣ 1 ∣ ∣ 1 ] 59
1819
+ +[ 01
1820
+ ̃,012, 02
1821
+ ̃,012, 12
1822
+ ̃,1,12, 12
1823
+ ̃ ∣ 1 ∣ ∣ 1 ] 58
1824
+ +[ 01
1825
+ ̃,012, 02
1826
+ ̃,012, 12
1827
+ ̃,12,2, 12
1828
+ ̃ ∣ 1 ∣ ∣ 1 ] 59
1829
+ +[ 01
1830
+ ̃,012, 02
1831
+ ̃,012, 12
1832
+ ̃,1 ∣ 1 ∣ ∣ 1 ] 58
1833
+ +[ 01
1834
+ ̃,012, 02
1835
+ ̃,012, 12
1836
+ ̃ ∣ 1 ∣ 1 ∣ 1 ] 60
1837
+ +[ 01
1838
+ ̃,012, 02
1839
+ ̃,012, 12
1840
+ ̃ ∣ 1 ∣ 1 ∣ 1 ] 60
1841
+ +[ 1, 01
1842
+ ̃,012, 02
1843
+ ̃,012, 12
1844
+ ̃ ∣ 1 ∣ ∣ 1 ] 55
1845
+ In the above sum, there are several terms which appear twice and which therefore cancel
1846
+ (over Z2). The terms that are labeled with the following numbers appear twice and cancel:
1847
+ 03 , 08 , 09 , 10 , 15 , 16 , 17 , 25 , 31 , 32 , 33 , 40 , 45 , 46 , 47 , 53 , 60 .
1848
+
1849
+ 22
1850
+ K. POIRIER AND T. TRADLER
1851
+ Moreover, in some instances the sum of three terms cancel. The following labels appear
1852
+ three times and also cancel:
1853
+ 01 , 02 , 06 , 07 , 11 , 12 , 18 , 19 , 20 , 21 , 24 , 26 , 27 , 28 , 29 , 30 , 34 , 35 ,
1854
+ 36 , 37 , 39 , 41 , 42 , 43 , 44 , 48 , 49 , 50 , 51 , 52 , 54 , 55 , 56 , 57 , 58 , 59 .
1855
+ There are six terms labeled with
1856
+ 38 , whose sum vanishes:
1857
+ 38 =([ 01
1858
+ ̃,012, 02
1859
+ ̃ ∣ 1 ∣ ∣ 1 ] + [ 01
1860
+ ̃,012, 02
1861
+ ̃,02, 12
1862
+ ̃ ∣ 1 ∣ ∣ 1 ] + [ 01
1863
+ ̃,012, 02
1864
+ ̃,12, 12
1865
+ ̃ ∣ 1 ∣ ∣ 1 ])
1866
+ + ([ 02
1867
+ ̃,012, 12
1868
+ ̃ ∣ 1 ∣ ∣ 1 ] + [ 01
1869
+ ̃,01, 02
1870
+ ̃,012, 12
1871
+ ̃ ∣ 1 ∣ ∣ 1 ] + [ 01
1872
+ ̃,02, 02
1873
+ ̃,012, 12
1874
+ ̃ ∣ 1 ∣ ∣ 1 ])
1875
+ =[ 01
1876
+ ̃,012, 12
1877
+ ̃ ∣ 1 ∣ ∣ 1 ] + [ 01
1878
+ ̃,012, 12
1879
+ ̃ ∣ 1 ∣ ∣ 1 ] = 0
1880
+ The left-over terms that do not cancel are:
1881
+ 04 = [ 02
1882
+ ̃ ∣ 02 ∣ ∣ 2 ]
1883
+ 05 = [ 02
1884
+ ̃ ∣ 12 ∣ ∣ 2 ] + [ 02
1885
+ ̃,02, 12
1886
+ ̃ ∣ 12 ∣ ∣ 2 ] + [ 02
1887
+ ̃,12, 12
1888
+ ̃ ∣ 12 ∣ ∣ 2 ] = [ 12
1889
+ ̃ ∣ 12 ∣ ∣ 2 ]
1890
+ 13 = [ 02
1891
+ ̃ ∣ 0 ∣ ∣ 01 ] + [ 01
1892
+ ̃,01, 02
1893
+ ̃ ∣ 0 ∣ ∣ 01 ] + [ 01
1894
+ ̃,02, 02
1895
+ ̃ ∣ 0 ∣ ∣ 01 ] = [ 01
1896
+ ̃ ∣ 0 ∣ ∣ 01 ]
1897
+ 14 = [ 02
1898
+ ̃ ∣ 0 ∣ ∣ 02 ]
1899
+ 22 = [ 02
1900
+ ̃ ∣ 1 ∣ ∣ 12 ] + [ 02
1901
+ ̃,02, 12
1902
+ ̃ ∣ 1 ∣ ∣ 12 ] + [ 02
1903
+ ̃,12, 12
1904
+ ̃ ∣ 1 ∣ ∣ 12 ] = [ 12
1905
+ ̃ ∣ 1 ∣ ∣ 12 ]
1906
+ 23 = [ 02
1907
+ ̃ ∣ 01 ∣ ∣ 1 ] + [ 01
1908
+ ̃,01, 02
1909
+ ̃ ∣ 01 ∣ ∣ 1 ] + [ 01
1910
+ ̃,02, 02
1911
+ ̃ ∣ 01 ∣ ∣ 1 ] = [ 01
1912
+ ̃ ∣ 01 ∣ ∣ 1 ]
1913
+ These terms are precisely F[02], F[12], and F[01], so that DF[012] = D0F[012] + D1F[012] =
1914
+ F[01] + F[02] + F[12], which is (4.19).
1915
+ References
1916
+ [CS]
1917
+ Moira Chas, Dennis Sullivan. String Topology. preprint arXiv:math/9911159
1918
+ [L]
1919
+ Jean-Louis Loday. Cyclic Homology, Grundlehren der mathematischen WIssenschaften 301 (1998),
1920
+ Springer.
1921
+ [M]
1922
+ Luc Menichi. String Topology for Spheres, Comment. Math. Helv. 84 (2009), 135–157.
1923
+ [T1]
1924
+ Thomas Tradler. Infinity Inner Products on A-Infinity Algebras. J. Homotopy Relat. Struct. 3
1925
+ (2008), no. 1, p. 245–271.
1926
+ [T2]
1927
+ Thomas Tradler. The BV Algebra on Hochschild Cohomology Induced by Infinity Inner Products.
1928
+ Ann. Inst. Fourier (Grenoble) 58 (2008), no. 7, p. 2351–2379.
1929
+ [TZS]
1930
+ Thomas Tradler, Mahmoud Zeinalian, Dennis Sullivan Infinity structure of Poincar´e duality spaces.
1931
+ Algebr. Geom. Topol. 7 (2007), p. 233–260.
1932
+ Kate Poirier, Department of Mathematics, New York City College of Technology,
1933
+ City University of New York, 300 Jay Street, Brooklyn, NY 11201
1934
+ Email address: [email protected]
1935
+ Thomas Tradler, Department of Mathematics, New York City College of Technology,
1936
+ City University of New York, 300 Jay Street, Brooklyn, NY 11201
1937
+ Email address: [email protected]
1938
+
UNE5T4oBgHgl3EQfAw5l/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf ADDED
Binary file (73 kB). View file
 
UtAzT4oBgHgl3EQf0_7l/content/tmp_files/2301.01794v1.pdf.txt ADDED
@@ -0,0 +1,1452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MATHEMATICA MONTISNIGRI
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
10
+
11
+
12
+
13
+
14
+
15
+
16
+
17
+ 2010 Mathematics Subject Classification: 30B10, 11M05, 11M41, 33B15.
18
+ Key words and Phrases: Riemann’s zeta function, Lerch transcendent, polylogarithm, digamma function,
19
+ Euler’s constant.
20
+ SEVERAL CLASSICAL IDENTITIES VIA MELLIN’S TRANSFORM
21
+ KHRISTO N. BOYADZHIEV
22
+ Department of mathematics, Ohio Northern University
23
+ Ada, Ohio, 45810, USA
24
+ E-mail: [email protected]
25
+ DOI:
26
+ Summary. We present a summation rule using Mellin’s transform to give short proofs of
27
+ some important classical relations between special functions and Bernoulli and Euler
28
+ polynomials. For example, the values of the Hurwitz zeta function at the negative integers are
29
+ expressed in terms of Bernoulli polynomials. We also show identities involving exponential
30
+ and Hermite polynomials.
31
+ 1. INTRODUCTION
32
+ Throughout we use the notation ( )
33
+ ,
34
+ a
35
+ a
36
+ it t
37
+ =
38
+ +
39
+ ∈ for the vertical line with abscissa
40
+ 0
41
+ 1
42
+ a
43
+ <
44
+ < , oriented from minus to plus infinity. First we recall the formulas for the Mellin
45
+ transform
46
+ 1
47
+ 0
48
+ ( )
49
+ ( )
50
+ s
51
+ G s
52
+ x
53
+ g x dx
54
+
55
+
56
+ = ∫
57
+
58
+ and its inverse
59
+
60
+ ( )
61
+ 1
62
+ ( )
63
+ ( )
64
+ ,
65
+ 0
66
+ 2
67
+ s
68
+ a
69
+ g x
70
+ x G s ds
71
+ x
72
+ i
73
+ π
74
+
75
+ =
76
+ >
77
+
78
+ .
79
+ (1)
80
+ Definition. For
81
+ 0
82
+ n >
83
+ consider integrals of the form
84
+ ( )
85
+ ( )
86
+ ,
87
+ 0
88
+ s
89
+ L n x G s ds
90
+ x
91
+
92
+ >
93
+
94
+
95
+ where ( )
96
+ L n consists of the line segment [
97
+ ,
98
+ ]
99
+ ni ni
100
+
101
+ together with the semicircle
102
+ ( )
103
+ R n in the left
104
+ half plane for which the line segment is the diagonal. If the integrals
105
+ ( )
106
+ ( )
107
+ s
108
+ R n x G s ds
109
+
110
+
111
+
112
+ approach zero when n → ∞, we say that the line of integration in (1) can be closed to the left.
113
+ In a similar manner we define integrals where the line of integration can be closed to the right.
114
+ Proposition. Suppose that the function
115
+ ( )
116
+ G s is meromorphic on the half plane
117
+ ( )
118
+ Re s
119
+ a
120
+ ε
121
+ <
122
+ +
123
+ for some small
124
+ 0
125
+ ε >
126
+ , and has only simple poles at
127
+ 0, 1, 2,...
128
+ s =
129
+ − −
130
+ , with residues
131
+ 0
132
+ 1
133
+ 2
134
+ ,
135
+ ,
136
+ ,...
137
+ c c c
138
+ .
139
+ If the line of integration in (1) can be closed to the left, the residue theorem provides the
140
+ representation
141
+
142
+ Khristo N. Boyadzhiev
143
+
144
+ 0
145
+ ( )
146
+ n
147
+ n
148
+ n
149
+ g x
150
+ c x
151
+
152
+ =
153
+ = ∑
154
+
155
+ (2)
156
+ for the function
157
+ ( )
158
+ g x from (1), i.e. this function is a power series. If now
159
+ ( )
160
+ f s is an
161
+ appropriate holomorphic function on
162
+ ( )
163
+ Re s
164
+ a
165
+ ε
166
+ <
167
+ +
168
+ without poles, we can write
169
+
170
+ ( )
171
+ 0
172
+ 1
173
+ ( ) ( )
174
+ (
175
+ )
176
+ 2
177
+ s
178
+ n
179
+ n
180
+ a
181
+ n
182
+ x
183
+ f s G s ds
184
+ c f
185
+ n x
186
+ i
187
+ π
188
+
189
+
190
+ =
191
+ =
192
+
193
+
194
+
195
+ ,
196
+ (3)
197
+ when the power series on the right side converges.
198
+ Formulas of this type are used for summation of series or interpolation, and are present in
199
+ many publications (see [2, 4, 9, 10] and the references there).
200
+ In this note we focus on a special area of applications for the proposition - obtaining some
201
+ classical identities by using Mellin inversion and comparing coefficients. In order to keep the
202
+ paper short we omit details and do not discuss convergence of some integrals and series. The
203
+ validity of such formulas is considered in [4, 9, 10].
204
+ The illustration of the method is given in the following examples.
205
+ 2 EXAMPLES
206
+ Remind that the residues of the gamma function at zero and the negative integers are given
207
+ by
208
+ ( 1)
209
+ Res( ,
210
+ )
211
+ !
212
+ n
213
+ n
214
+ n
215
+
216
+ Γ −
217
+ =
218
+ for
219
+ 0,1,2,...
220
+ n =
221
+ . Also ( )s
222
+ Γ
223
+ has rapid decay on vertical lines. We have
224
+ the estimate ([15, (20)]
225
+ (
226
+ )
227
+ it
228
+ a +
229
+ Γ
230
+ ~
231
+ 1
232
+ 2
233
+ 2
234
+ 2
235
+ | |
236
+ ,
237
+ a
238
+ t
239
+ t
240
+ e
241
+ a
242
+ π
243
+ π
244
+
245
+
246
+ ∈
247
+ when | |t → ∞ . The estimate helps for the convergence of our integrals.
248
+ The above facts will be used in the following examples. We note that in these examples the
249
+ lines of integration can be closed to the left (proofs are standard, using the growth estimate for
250
+ ( )s
251
+ Γ
252
+ ). Note also that when we replace
253
+ ( )
254
+ G s by ( )s
255
+ Γ
256
+ in (3) we have
257
+
258
+ ( )
259
+ 0
260
+ 1
261
+ ( 1)
262
+ ( ) ( )
263
+ (
264
+ )
265
+ 2
266
+ !
267
+ n
268
+ s
269
+ n
270
+ a
271
+ n
272
+ x
273
+ f s
274
+ s ds
275
+ f
276
+ n x
277
+ i
278
+ n
279
+ π
280
+
281
+
282
+ =
283
+
284
+ Γ
285
+ =
286
+
287
+
288
+
289
+ .
290
+ (4)
291
+ Example 1. Ramanujan’s Master Theorem. As Hardy writes in [8], Ramanujan was very fond
292
+ of his integral formula [8, p.186]
293
+
294
+ 1
295
+ 0
296
+ 0
297
+ ( )(
298
+ )
299
+ (
300
+ ) ( )
301
+ !
302
+ n
303
+ s
304
+ n
305
+ f n
306
+ x
307
+ x
308
+ dx
309
+ f
310
+ s
311
+ s
312
+ n
313
+
314
+
315
+
316
+ =
317
+
318
+
319
+
320
+ =
321
+
322
+ Γ
323
+
324
+
325
+
326
+
327
+
328
+
329
+
330
+
331
+ (5)
332
+ and used it for many applications. Berndt rightly calls it Ramanujan’s Master Theorem [2,
333
+ Entry 11, p.105]. Details, comments, and applications of (5) are given in these two books and
334
+ also in [1, 4, 6]. Clearly, after replacing
335
+ ( )
336
+ f s by
337
+ (
338
+ )
339
+ f
340
+ s
341
+
342
+ equation (5) turns into (4) after
343
+ Mellin inversion. Ramanujan did not use the residue theorem for his proof but only standard
344
+ calculus (see [2, p.106]).
345
+
346
+ Khristo N. Boyadzhiev
347
+ Example 2. The Hurwitz zeta function is defined by
348
+ 0
349
+ 1
350
+ ( , )
351
+ (Re( )
352
+ 0, Re( )
353
+ 1)
354
+ (
355
+ )s
356
+ n
357
+ s z
358
+ z
359
+ s
360
+ n
361
+ z
362
+ ζ
363
+
364
+ =
365
+ =
366
+ >
367
+ >
368
+ +
369
+
370
+
371
+ with integral representation
372
+ (1
373
+ )
374
+ 1
375
+ 0
376
+ ( , ) ( )
377
+ (Re( )
378
+ 1)
379
+ 1
380
+ t
381
+ z
382
+ s
383
+ t
384
+ e
385
+ s z
386
+ s
387
+ t
388
+ dt
389
+ s
390
+ e
391
+ ζ
392
+
393
+
394
+
395
+ Γ
396
+ =
397
+ >
398
+
399
+
400
+
401
+ When
402
+ 1
403
+ z = ,
404
+ ( ,1)
405
+ ( )
406
+ s
407
+ s
408
+ ζ
409
+ ζ
410
+ =
411
+ is the Riemann zeta function [5].
412
+ We have also the modified integral representation (argument is the same as on pp. 61-62 in
413
+ [13])
414
+ (1
415
+ )
416
+ 1
417
+ 0
418
+ 1
419
+ ( , ) ( )
420
+ (0
421
+ Re( )
422
+ 1)
423
+ 1
424
+ t
425
+ z
426
+ s
427
+ t
428
+ e
429
+ s z
430
+ s
431
+ t
432
+ dt
433
+ s
434
+ e
435
+ t
436
+ ζ
437
+
438
+
439
+ − 
440
+
441
+ Γ
442
+ =
443
+ <
444
+ <
445
+
446
+
447
+
448
+
449
+
450
+
451
+
452
+
453
+ which is a Mellin transform formula. By Mellin inversion
454
+ (1
455
+ )
456
+ ( )
457
+ 0
458
+ 1
459
+ 1
460
+ ( 1)
461
+ ( , ) ( )
462
+ (
463
+ , )
464
+ 1
465
+ 2
466
+ !
467
+ t
468
+ z
469
+ n
470
+ s
471
+ t
472
+ a
473
+ n
474
+ e
475
+ t
476
+ s z
477
+ s ds
478
+ n z
479
+ e
480
+ t
481
+ i
482
+ n
483
+ ζ
484
+ ζ
485
+ π
486
+
487
+
488
+
489
+ =
490
+
491
+
492
+ =
493
+ Γ
494
+ =
495
+
496
+
497
+
498
+
499
+
500
+ At the same time, the Bernoulli polynomials
501
+ ( )
502
+ n
503
+ B z have the generating function
504
+ 0
505
+ ( )
506
+ 1
507
+ !
508
+ xz
509
+ k
510
+ k
511
+ x
512
+ k
513
+ B z
514
+ xe
515
+ x
516
+ e
517
+ k
518
+
519
+ =
520
+ =
521
+
522
+
523
+
524
+ and from this
525
+ (1
526
+ )
527
+ 1
528
+ 1
529
+ 1
530
+ 0
531
+ (1
532
+ )
533
+ (1
534
+ )
535
+ 1
536
+ ( , )
537
+ 1
538
+ !
539
+ (
540
+ 1)!
541
+ x
542
+ z
543
+ k
544
+ n
545
+ k
546
+ n
547
+ x
548
+ k
549
+ n
550
+ B
551
+ z
552
+ B
553
+ z
554
+ e
555
+ g x z
556
+ x
557
+ x
558
+ e
559
+ x
560
+ k
561
+ n
562
+
563
+
564
+
565
+
566
+ +
567
+ =
568
+ =
569
+
570
+
571
+
572
+
573
+ =
574
+ =
575
+
576
+ +
577
+
578
+
579
+ .
580
+ Therefore, by comparing coefficients for
581
+ 0, 1,...
582
+ n =
583
+ we find the classical formula
584
+ 1
585
+ 1
586
+ ( 1)
587
+ (1
588
+ )
589
+ ( )
590
+ (
591
+ , )
592
+ 1
593
+ 1
594
+ n
595
+ n
596
+ n
597
+ B
598
+ z
599
+ B
600
+ z
601
+ n z
602
+ n
603
+ n
604
+ ζ
605
+ +
606
+ +
607
+
608
+
609
+
610
+ =
611
+ = −
612
+ +
613
+ +
614
+
615
+ (using the property ( 1)
616
+ (1
617
+ )
618
+ ( )
619
+ n
620
+ n
621
+ n
622
+ B
623
+ z
624
+ B z
625
+
626
+
627
+ =
628
+ , so that
629
+ 1
630
+ 1
631
+ ( 1)
632
+ (1
633
+ )
634
+ ( )
635
+ n
636
+ n
637
+ n
638
+ B
639
+ z
640
+ B
641
+ z
642
+ +
643
+ +
644
+
645
+
646
+ = −
647
+ ).
648
+ In particular,
649
+ 1
650
+ 1
651
+ (0, )
652
+ ( )
653
+ 2
654
+ z
655
+ B z
656
+ z
657
+ ζ
658
+ = −
659
+ =
660
+ − .
661
+ For the Bernoulli numbers
662
+ (0)
663
+ ( 1)
664
+ (1)
665
+ n
666
+ n
667
+ n
668
+ n
669
+ B
670
+ B
671
+ B
672
+ =
673
+ = −
674
+ we have
675
+ 1
676
+ 1
677
+ ( 1)
678
+ (0)
679
+ ( 1)
680
+ (
681
+ )
682
+ (
683
+ ,1)
684
+ 1
685
+ 1
686
+ n
687
+ n
688
+ n
689
+ n
690
+ B
691
+ B
692
+ n
693
+ n
694
+ n
695
+ n
696
+ ζ
697
+ ζ
698
+ +
699
+ +
700
+
701
+
702
+
703
+ =
704
+
705
+ =
706
+ =
707
+ +
708
+ +
709
+ .
710
+ The odd Bernoulli numbers are zeros except
711
+ 1
712
+ 1/ 2
713
+ B = −
714
+ . Thus
715
+ (0)
716
+ 1/ 2
717
+ ζ
718
+ = −
719
+ .
720
+
721
+ Khristo N. Boyadzhiev
722
+ Next we give a new proof of a result of Kenneth Williams and Zhang Nan-Yue [14].
723
+ Example 3. Consider now the alternating Hurwitz zeta function
724
+ 0
725
+ ( 1)
726
+ ( , )
727
+ (Re( )
728
+ 0, Re( )
729
+ 0)
730
+ (
731
+ )
732
+ n
733
+ s
734
+ n
735
+ s z
736
+ z
737
+ s
738
+ n
739
+ z
740
+ η
741
+
742
+ =
743
+
744
+ =
745
+ >
746
+ >
747
+ +
748
+
749
+
750
+ which extends to the entire complex plane as analytic in the variable s and has the integral
751
+ representation
752
+ (1
753
+ )
754
+ 1
755
+ 0
756
+ ( , ) ( )
757
+ (Re( )
758
+ 0)
759
+ 1
760
+ t
761
+ z
762
+ s
763
+ t
764
+ e
765
+ s z
766
+ s
767
+ t
768
+ dt
769
+ s
770
+ e
771
+ η
772
+
773
+
774
+
775
+ Γ
776
+ =
777
+ >
778
+ +
779
+
780
+ .
781
+ By inversion
782
+ (1
783
+ )
784
+ ( )
785
+ 0
786
+ 1
787
+ ( 1)
788
+ ( , ) ( )
789
+ (
790
+ , )
791
+ 1
792
+ 2
793
+ !
794
+ t
795
+ z
796
+ n
797
+ s
798
+ n
799
+ t
800
+ a
801
+ n
802
+ e
803
+ t
804
+ s z
805
+ s ds
806
+ n z x
807
+ e
808
+ i
809
+ n
810
+ η
811
+ η
812
+ π
813
+
814
+
815
+
816
+ =
817
+
818
+ =
819
+ Γ
820
+ =
821
+
822
+ +
823
+
824
+
825
+ .
826
+ Euler’s polynomials
827
+ ( )
828
+ n
829
+ E
830
+ z are defined by the generating function
831
+ 0
832
+ 2
833
+ ( )
834
+ (| |
835
+ )
836
+ 1
837
+ !
838
+ t x
839
+ n
840
+ n
841
+ t
842
+ n
843
+ e
844
+ t
845
+ E
846
+ x
847
+ t
848
+ e
849
+ n
850
+ π
851
+
852
+ =
853
+ =
854
+ <
855
+ +
856
+
857
+ .
858
+ Comparing coefficients gives
859
+ ( 1)
860
+ 1
861
+ (
862
+ , )
863
+ (1
864
+ )
865
+ ( )
866
+ 2
867
+ 2
868
+ n
869
+ n
870
+ n
871
+ n z
872
+ E
873
+ z
874
+ E
875
+ z
876
+ η
877
+
878
+
879
+ =
880
+
881
+ =
882
+
883
+ by the property
884
+ (1
885
+ )
886
+ ( 1)
887
+ ( )
888
+ n
889
+ n
890
+ n
891
+ E
892
+ z
893
+ E
894
+ z
895
+
896
+ = −
897
+ .
898
+ Example 4. Euler worked with the function
899
+ 0
900
+ ( 1)
901
+ ( )
902
+ (Re
903
+ 0)
904
+ (2
905
+ 1)
906
+ n
907
+ s
908
+ n
909
+ L s
910
+ s
911
+ n
912
+
913
+ =
914
+
915
+ =
916
+ >
917
+ +
918
+
919
+
920
+ which we call here Euler’s L -function (sometimes it is called Dirichlet’s L -function). This
921
+ function has the integral representation
922
+
923
+ 1
924
+ 0
925
+ 2 ( ) ( )
926
+ cosh
927
+ sx
928
+ s L s
929
+ dx
930
+ x
931
+
932
+
933
+ Γ
934
+ = ∫
935
+ .
936
+ It also has analytic extension on the complex plane.
937
+ By Mellin inversion and using equation (4)
938
+ ( )
939
+ 0
940
+ 1
941
+ 1
942
+ ( 1)
943
+ ( ) ( )
944
+ (
945
+ )
946
+ 2cosh( )
947
+ 2
948
+ !
949
+ n
950
+ s
951
+ n
952
+ a
953
+ n
954
+ x L s
955
+ s ds
956
+ L
957
+ n x
958
+ x
959
+ i
960
+ n
961
+ π
962
+
963
+
964
+ =
965
+
966
+ =
967
+ Γ
968
+ =
969
+
970
+
971
+
972
+
973
+ Euler’s numbers
974
+ n
975
+ E are defined by the generating function
976
+ 0
977
+ 1
978
+ cosh
979
+ !
980
+ n
981
+ n
982
+ n
983
+ E x
984
+ x
985
+ n
986
+
987
+ =
988
+ = ∑
989
+ .
990
+
991
+ Khristo N. Boyadzhiev
992
+ This function is even, so the Euler numbers with odd indices are zeros. By comparing
993
+ coefficients we find
994
+ 2
995
+ 1
996
+ ( 2 )
997
+ (
998
+ 0,1, 2,...)
999
+ 2
1000
+ n
1001
+ L
1002
+ n
1003
+ E
1004
+ n
1005
+
1006
+ =
1007
+ =
1008
+ .
1009
+ Example 5. The exponential polynomials
1010
+ n
1011
+ ϕ are defined by the generating function
1012
+
1013
+ (
1014
+ 1)
1015
+ 0
1016
+ ( )
1017
+ !
1018
+ x
1019
+ n
1020
+ z e
1021
+ n
1022
+ n
1023
+ x
1024
+ e
1025
+ z n
1026
+ ϕ
1027
+
1028
+
1029
+ =
1030
+ = ∑
1031
+
1032
+ (see [3, 12]). We will use the function
1033
+
1034
+ (
1035
+ 1)
1036
+ 0
1037
+ ( , )
1038
+ ( 1)
1039
+ ( )
1040
+ !
1041
+ x
1042
+ n
1043
+ z e
1044
+ n
1045
+ n
1046
+ n
1047
+ x
1048
+ x z
1049
+ e
1050
+ z n
1051
+ ψ
1052
+ ϕ
1053
+
1054
+
1055
+
1056
+ =
1057
+ =
1058
+ =
1059
+
1060
+
1061
+
1062
+ which has Mellin transform
1063
+
1064
+ 1
1065
+ 1
1066
+ 0
1067
+ 0
1068
+ ( , )
1069
+ ( , )
1070
+ s
1071
+ z
1072
+ s
1073
+ x
1074
+ ze
1075
+ s z
1076
+ x
1077
+ x z dx
1078
+ e
1079
+ x
1080
+ e
1081
+ dx
1082
+ ψ
1083
+
1084
+
1085
+
1086
+
1087
+
1088
+
1089
+ Ψ
1090
+ =
1091
+ =
1092
+
1093
+
1094
+
1095
+
1096
+ 1
1097
+ 0
1098
+ 0
1099
+ 0
1100
+ ( )
1101
+ ( ) ( , )
1102
+ !
1103
+ !
1104
+ n
1105
+ n
1106
+ z
1107
+ s
1108
+ z
1109
+ s
1110
+ n
1111
+ n
1112
+ nx
1113
+ z
1114
+ z
1115
+ e
1116
+ x
1117
+ e
1118
+ dx
1119
+ e
1120
+ s
1121
+ s f s z
1122
+ n
1123
+ n n
1124
+
1125
+
1126
+
1127
+
1128
+
1129
+
1130
+
1131
+ =
1132
+ =
1133
+
1134
+
1135
+ =
1136
+ =
1137
+ Γ
1138
+ = Γ
1139
+
1140
+
1141
+
1142
+
1143
+
1144
+
1145
+
1146
+
1147
+ with
1148
+ 0
1149
+ ( , )
1150
+ !
1151
+ n
1152
+ z
1153
+ s
1154
+ n
1155
+ z
1156
+ f s z
1157
+ e
1158
+ n n
1159
+
1160
+
1161
+ =
1162
+ =
1163
+
1164
+ .
1165
+ Now we have from equation (4)
1166
+ ( )
1167
+ ( )
1168
+ 0
1169
+ 1
1170
+ 1
1171
+ ( 1)
1172
+ ( , )
1173
+ ( , )
1174
+ ( , ) ( )
1175
+ (
1176
+ , )
1177
+ 2
1178
+ 2
1179
+ !
1180
+ n
1181
+ s
1182
+ s
1183
+ n
1184
+ a
1185
+ a
1186
+ n
1187
+ x z
1188
+ x
1189
+ x s ds
1190
+ x
1191
+ f s z
1192
+ s ds
1193
+ f
1194
+ n z x
1195
+ i
1196
+ i
1197
+ n
1198
+ ψ
1199
+ π
1200
+ π
1201
+
1202
+
1203
+
1204
+ =
1205
+
1206
+ =
1207
+ Ψ
1208
+ =
1209
+ Γ
1210
+ =
1211
+
1212
+
1213
+
1214
+
1215
+
1216
+ Thus for
1217
+ 0
1218
+ n ≥
1219
+ (with the agreent
1220
+ 00
1221
+ 1
1222
+ = )
1223
+
1224
+ 0
1225
+ ( )
1226
+ (
1227
+ , )
1228
+ !
1229
+ n
1230
+ z
1231
+ k
1232
+ n
1233
+ k
1234
+ k
1235
+ z
1236
+ f
1237
+ n z
1238
+ e
1239
+ z
1240
+ k
1241
+ ϕ
1242
+
1243
+
1244
+ =
1245
+ =
1246
+
1247
+ =
1248
+
1249
+
1250
+ (6)
1251
+ which is one of the fundamental properties of the exponential polynomials.
1252
+ Note that identity (6) can be used for a meaningful extension of
1253
+ ( )
1254
+ n z
1255
+ ϕ
1256
+ to
1257
+ ( )z
1258
+ λ
1259
+ ϕ
1260
+ with non-
1261
+ integer index λ . For instance, we have (with 0
1262
+ 0
1263
+ λ =
1264
+ )
1265
+
1266
+ 1
1267
+ ( )
1268
+ !
1269
+ z
1270
+ k
1271
+ k
1272
+ k
1273
+ z
1274
+ e
1275
+ z
1276
+ k
1277
+ λ
1278
+ λ
1279
+ ϕ
1280
+
1281
+
1282
+ =
1283
+ =
1284
+
1285
+ .
1286
+ Example 6. The generating function for the Hermite polynomials is
1287
+
1288
+ Khristo N. Boyadzhiev
1289
+
1290
+ 2
1291
+ 2
1292
+ 0
1293
+ ( , )
1294
+ ( )
1295
+ !
1296
+ n
1297
+ xz
1298
+ x
1299
+ n
1300
+ n
1301
+ x
1302
+ x z
1303
+ e
1304
+ H
1305
+ z n
1306
+ ψ
1307
+
1308
+
1309
+ =
1310
+ =
1311
+ = ∑
1312
+
1313
+ with Mellin transform [11, p. 27]
1314
+
1315
+
1316
+ 2
1317
+ /2
1318
+ 2
1319
+ ( , )
1320
+ 2
1321
+ ( 2 ) ( )
1322
+ z
1323
+ s
1324
+ s
1325
+ s z
1326
+ e
1327
+ D
1328
+ z
1329
+ s
1330
+
1331
+
1332
+ Ψ
1333
+ =
1334
+ Γ
1335
+ .
1336
+ where
1337
+ p
1338
+ D are the parabolic cylinder functions [7, pp. 1065-1067]. Let now
1339
+ 2
1340
+ /2
1341
+ 2
1342
+ ( , )
1343
+ 2
1344
+ ( 2 )
1345
+ z
1346
+ s
1347
+ s
1348
+ f s z
1349
+ e
1350
+ D
1351
+ z
1352
+
1353
+
1354
+ =
1355
+ .
1356
+ Then
1357
+ ( )
1358
+ ( 1)
1359
+ (
1360
+ , )
1361
+ n
1362
+ n
1363
+ H
1364
+ z
1365
+ f
1366
+ n z
1367
+ = −
1368
+
1369
+ and since
1370
+ ( )
1371
+ ( 1)
1372
+ (
1373
+ )
1374
+ n
1375
+ n
1376
+ n
1377
+ H
1378
+ z
1379
+ H
1380
+ z
1381
+ = −
1382
+
1383
+ one finds the classical result
1384
+
1385
+ 2
1386
+ 2
1387
+ 2
1388
+ ( )
1389
+ 2
1390
+ ( 2 )
1391
+ n
1392
+ z
1393
+ n
1394
+ n
1395
+ H
1396
+ z
1397
+ e D
1398
+ z
1399
+ =
1400
+
1401
+ ([7, entry 9.253, p.1067]).
1402
+ 6. CONCLUSIONS
1403
+ In this note we presented a rule how Mellin’s transform can be used to give short proofs of
1404
+ several classical identities connecting, in particular, the Bernoulli and Euler polynomials to
1405
+ the values of the Hurwitz and alternating Hurwitz functions at the negative integers. We also
1406
+ proved identities for the exponential and Hermite polynomials.
1407
+ REFERENCES
1408
+ [1]
1409
+ T. Amdeberhan, I. Gonzalez, M. Harrison, V. H. Moll, A. Straub, “Ramanujan's Master
1410
+ Theorem”, The Ramanujan Journal, 29, 103–120 (2012).
1411
+ [2]
1412
+ B. C. Berndt, Ramanujan’s Notebooks, Parts I and II, Springer-Verlag, New York (1985,
1413
+ 1989).
1414
+ [3]
1415
+ K. N. Boyadzhiev, “Exponential polynomials, Stirling numbers, and evaluation of some
1416
+ Gamma integrals”, Abstract and Applied Analysis, Article ID 168672 (2009).
1417
+ [4]
1418
+ G. Dahlquist, “On summation formulas due to Plana, Lindelöf and Abel, and related Gauss-
1419
+ Christoffel rules, I”, BIT Numer. Math., 37(2), 256-295 (1997); “On summation formulas
1420
+ due to Plana, Lindelöf and Abel, and related Gauss-Christoffel rules, II”, BIT Numer. Math.,
1421
+ 37(4), 804-832 (1997); “On Summation Formulas Due to Plana, Lindelöf and Abel, and
1422
+ Related Gauss-Christoffel Rules, III”, BIT Numer. Math. 39, 51–78 (1999).
1423
+ [5]
1424
+ H. M. Edwards, Riemann’s Zeta Function, Academic Press, Boston, (1974).
1425
+ [6]
1426
+ I. González, V. H. Moll, I. Schmidt, “A generalized Ramanujan Master Theorem applied to
1427
+ the evaluation of Feynman diagrams”, Adv. Appl. Math., 63, 214-230 (2015).
1428
+ [7]
1429
+ I. S. Gradshteyn and I. M. Ryzhik, Table of Integrals, Series, and Products, Academic
1430
+ Press, (1980).
1431
+ [8]
1432
+ G. H. Hardy, Ramanujan, Cambridge University Press, (1940).
1433
+ [9]
1434
+ E. Lindelöf, Le Calcul des Résidues et ses Applications à la Theorie des Fonctions,
1435
+ Gauthier-Villars, Paris, (1905).
1436
+ [10] D.S. Mitrinovic and J. D. Keckic, The Cauchy Method of Residues, D. Reidel Publ. Co.,
1437
+ Dordrecht/Boston, (1984).
1438
+ [11] F. Oberhettinger, Tables of Mellin Transforms, Springer-Verlag, New York,1974.
1439
+
1440
+
1441
+ Khristo N. Boyadzhiev
1442
+ [12] Gian-Carlo Rota, Finite Operator Calculus, Academic Press, new York, (1975).
1443
+ [13] D.V. Widder, An Introduction to Transform Theory, Academic Press, New York, (1971).
1444
+ [14] K. S. Williams and Z. Nan-Yue, “Special values of the Lerch zeta function and the
1445
+ evaluation of certain integrals”, Proc. Amer. Math. Soc., 119, 35-49 (1993).
1446
+ [15] W. Heap, Notes on the gamma function and the Riemann zeta function (online publication).
1447
+ https://wiki.math.ntnu.no/_media/ma3001/2014v/analytisktallteori/the_riemann_zeta_functi
1448
+ on_notes.pdf
1449
+
1450
+
1451
+
1452
+
UtAzT4oBgHgl3EQf0_7l/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf,len=193
2
+ page_content='MATHEMATICA MONTISNIGRI 2010 Mathematics Subject Classification: 30B10, 11M05, 11M41, 33B15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
3
+ page_content=' Key words and Phrases: Riemann’s zeta function, Lerch transcendent, polylogarithm, digamma function, Euler’s constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
4
+ page_content=' SEVERAL CLASSICAL IDENTITIES VIA MELLIN’S TRANSFORM KHRISTO N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
5
+ page_content=' BOYADZHIEV Department of mathematics, Ohio Northern University Ada, Ohio, 45810, USA E-mail: k-boyadzhiev@onu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
6
+ page_content='edu DOI: Summary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
7
+ page_content=' We present a summation rule using Mellin’s transform to give short proofs of some important classical relations between special functions and Bernoulli and Euler polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
8
+ page_content=' For example, the values of the Hurwitz zeta function at the negative integers are expressed in terms of Bernoulli polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
9
+ page_content=' We also show identities involving exponential and Hermite polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
10
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
11
+ page_content=' INTRODUCTION Throughout we use the notation ( ) , a a it t = + ∈\uf0a1 for the vertical line with abscissa 0 1 a < < , oriented from minus to plus infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
12
+ page_content=' First we recall the formulas for the Mellin transform 1 0 ( ) ( ) s G s x g x dx ∞ − = ∫ and its inverse ( ) 1 ( ) ( ) , 0 2 s a g x x G s ds x i π − = > ∫ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
13
+ page_content=' (1) Definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
14
+ page_content=' For 0 n > consider integrals of the form ( ) ( ) , 0 s L n x G s ds x − > ∫ where ( ) L n consists of the line segment [ , ] ni ni − together with the semicircle ( ) R n in the left half plane for which the line segment is the diagonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
15
+ page_content=' If the integrals ( ) ( ) s R n x G s ds − ∫ approach zero when n → ∞, we say that the line of integration in (1) can be closed to the left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
16
+ page_content=' In a similar manner we define integrals where the line of integration can be closed to the right.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
17
+ page_content=' Proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
18
+ page_content=' Suppose that the function ( ) G s is meromorphic on the half plane ( ) Re s a ε < + for some small 0 ε > , and has only simple poles at 0, 1, 2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
19
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
20
+ page_content=' s = − − , with residues 0 1 2 , , ,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
21
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
22
+ page_content=' c c c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
23
+ page_content=' If the line of integration in (1) can be closed to the left, the residue theorem provides the representation Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
24
+ page_content=' Boyadzhiev 0 ( ) n n n g x c x ∞ = = ∑ (2) for the function ( ) g x from (1), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
25
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
26
+ page_content=' this function is a power series.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
27
+ page_content=' If now ( ) f s is an appropriate holomorphic function on ( ) Re s a ε < + without poles, we can write ( ) 0 1 ( ) ( ) ( ) 2 s n n a n x f s G s ds c f n x i π ∞ − = = − ∑ ∫ , (3) when the power series on the right side converges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
28
+ page_content=' Formulas of this type are used for summation of series or interpolation, and are present in many publications (see [2, 4, 9, 10] and the references there).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
29
+ page_content=' In this note we focus on a special area of applications for the proposition - obtaining some classical identities by using Mellin inversion and comparing coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
30
+ page_content=' In order to keep the paper short we omit details and do not discuss convergence of some integrals and series.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
31
+ page_content=' The validity of such formulas is considered in [4, 9, 10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
32
+ page_content=' The illustration of the method is given in the following examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
33
+ page_content=' 2 EXAMPLES Remind that the residues of the gamma function at zero and the negative integers are given by ( 1) Res( , ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
34
+ page_content=' n n n − Γ − = for 0,1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
35
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
36
+ page_content=' n = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
37
+ page_content=' Also ( )s Γ has rapid decay on vertical lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
38
+ page_content=' We have the estimate ([15, (20)] ( ) it a + Γ ~ 1 2 2 2 | | , a t t e a π π − − ∈\uf0a1 when | |t → ∞ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
39
+ page_content=' The estimate helps for the convergence of our integrals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
40
+ page_content=' The above facts will be used in the following examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
41
+ page_content=' We note that in these examples the lines of integration can be closed to the left (proofs are standard, using the growth estimate for ( )s Γ ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
42
+ page_content=' Note also that when we replace ( ) G s by ( )s Γ in (3) we have ( ) 0 1 ( 1) ( ) ( ) ( ) 2 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
43
+ page_content=' n s n a n x f s s ds f n x i n π ∞ − = − Γ = − ∑ ∫ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
44
+ page_content=' (4) Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
45
+ page_content=' Ramanujan’s Master Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
46
+ page_content=' As Hardy writes in [8], Ramanujan was very fond of his integral formula [8, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
47
+ page_content='186] 1 0 0 ( )( ) ( ) ( ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
48
+ page_content=' n s n f n x x dx f s s n ∞ ∞ − = \uf8f1 \uf8fc − = − Γ \uf8f2 \uf8fd \uf8f3 \uf8fe ∑ ∫ (5) and used it for many applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
49
+ page_content=' Berndt rightly calls it Ramanujan’s Master Theorem [2, Entry 11, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
50
+ page_content='105].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
51
+ page_content=' Details, comments, and applications of (5) are given in these two books and also in [1, 4, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
52
+ page_content=' Clearly, after replacing ( ) f s by ( ) f s − equation (5) turns into (4) after Mellin inversion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
53
+ page_content=' Ramanujan did not use the residue theorem for his proof but only standard calculus (see [2, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
54
+ page_content='106]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
55
+ page_content=' Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
56
+ page_content=' Boyadzhiev Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
57
+ page_content=' The Hurwitz zeta function is defined by 0 1 ( , ) (Re( ) 0, Re( ) 1) ( )s n s z z s n z ζ ∞ = = > > + ∑ with integral representation (1 ) 1 0 ( , ) ( ) (Re( ) 1) 1 t z s t e s z s t dt s e ζ ∞ − − Γ = > − ∫ When 1 z = , ( ,1) ( ) s s ζ ζ = is the Riemann zeta function [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
58
+ page_content=' We have also the modified integral representation (argument is the same as on pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
59
+ page_content=' 61-62 in [13]) (1 ) 1 0 1 ( , ) ( ) (0 Re( ) 1) 1 t z s t e s z s t dt s e t ζ ∞ − − \uf8eb \uf8f6 Γ = < < − \uf8ec \uf8f7 − \uf8ed \uf8f8 ∫ which is a Mellin transform formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
60
+ page_content=' By Mellin inversion (1 ) ( ) 0 1 1 ( 1) ( , ) ( ) ( , ) 1 2 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
61
+ page_content=' t z n s t a n e t s z s ds n z e t i n ζ ζ π − ∞ − = − − = Γ = − − ∑ ∫ At the same time, the Bernoulli polynomials ( ) n B z have the generating function 0 ( ) 1 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
62
+ page_content=' xz k k x k B z xe x e k ∞ = = − ∑ and from this (1 ) 1 1 1 0 (1 ) (1 ) 1 ( , ) 1 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
63
+ page_content=' ( 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
64
+ page_content=' x z k n k n x k n B z B z e g x z x x e x k n − ∞ ∞ − + = = − − ≡ − = = − + ∑ ∑ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
65
+ page_content=' Therefore, by comparing coefficients for 0, 1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
66
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
67
+ page_content=' n = we find the classical formula 1 1 ( 1) (1 ) ( ) ( , ) 1 1 n n n B z B z n z n n ζ + + − − − = = − + + (using the property ( 1) (1 ) ( ) n n n B z B z − − = , so that 1 1 ( 1) (1 ) ( ) n n n B z B z + + − − = − ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
68
+ page_content=' In particular, 1 1 (0, ) ( ) 2 z B z z ζ = − = − .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
69
+ page_content=' For the Bernoulli numbers (0) ( 1) (1) n n n n B B B = = − we have 1 1 ( 1) (0) ( 1) ( ) ( ,1) 1 1 n n n n B B n n n n ζ ζ + + − − − = − = = + + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
70
+ page_content=' The odd Bernoulli numbers are zeros except 1 1/ 2 B = − .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
71
+ page_content=' Thus (0) 1/ 2 ζ = − .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
72
+ page_content=' Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
73
+ page_content=' Boyadzhiev Next we give a new proof of a result of Kenneth Williams and Zhang Nan-Yue [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
74
+ page_content=' Example 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
75
+ page_content=' Consider now the alternating Hurwitz zeta function 0 ( 1) ( , ) (Re( ) 0, Re( ) 0) ( ) n s n s z z s n z η ∞ = − = > > + ∑ which extends to the entire complex plane as analytic in the variable s and has the integral representation (1 ) 1 0 ( , ) ( ) (Re( ) 0) 1 t z s t e s z s t dt s e η ∞ − − Γ = > + ∫ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
76
+ page_content=' By inversion (1 ) ( ) 0 1 ( 1) ( , ) ( ) ( , ) 1 2 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
77
+ page_content=' t z n s n t a n e t s z s ds n z x e i n η η π − ∞ − = − = Γ = − + ∑ ∫ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
78
+ page_content=' Euler’s polynomials ( ) n E z are defined by the generating function 0 2 ( ) (| | ) 1 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
79
+ page_content=' t x n n t n e t E x t e n π ∞ = = < + ∑ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
80
+ page_content=' Comparing coefficients gives ( 1) 1 ( , ) (1 ) ( ) 2 2 n n n n z E z E z η − − = − = by the property (1 ) ( 1) ( ) n n n E z E z − = − .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
81
+ page_content=' Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
82
+ page_content=' Euler worked with the function 0 ( 1) ( ) (Re 0) (2 1) n s n L s s n ∞ = − = > + ∑ which we call here Euler’s L -function (sometimes it is called Dirichlet’s L -function).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
83
+ page_content=' This function has the integral representation 1 0 2 ( ) ( ) cosh sx s L s dx x ∞ − Γ = ∫ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
84
+ page_content=' It also has analytic extension on the complex plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
85
+ page_content=' By Mellin inversion and using equation (4) ( ) 0 1 1 ( 1) ( ) ( ) ( ) 2cosh( ) 2 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
86
+ page_content=' n s n a n x L s s ds L n x x i n π ∞ − = − = Γ = − ∑ ∫ Euler’s numbers n E are defined by the generating function 0 1 cosh !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
87
+ page_content=' n n n E x x n ∞ = = ∑ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
88
+ page_content=' Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
89
+ page_content=' Boyadzhiev This function is even, so the Euler numbers with odd indices are zeros.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
90
+ page_content=' By comparing coefficients we find 2 1 ( 2 ) ( 0,1, 2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
91
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
92
+ page_content=') 2 n L n E n − = = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
93
+ page_content=' Example 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
94
+ page_content=' The exponential polynomials n ϕ are defined by the generating function ( 1) 0 ( ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
95
+ page_content=' x n z e n n x e z n ϕ ∞ − = = ∑ (see [3, 12]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
96
+ page_content=' We will use the function ( 1) 0 ( , ) ( 1) ( ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
97
+ page_content=' x n z e n n n x x z e z n ψ ϕ − ∞ − = = = − ∑ which has Mellin transform 1 1 0 0 ( , ) ( , ) s z s x ze s z x x z dx e x e dx ψ ∞ ∞ − − − − Ψ = = ∫ ∫ 1 0 0 0 ( ) ( ) ( , ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
98
+ page_content=' !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
99
+ page_content=' n n z s z s n n nx z z e x e dx e s s f s z n n n ∞ ∞ ∞ − − − − = = \uf8f1 \uf8fc = = Γ = Γ \uf8f2 \uf8fd \uf8f3 \uf8fe ∑ ∑ ∫ with 0 ( , ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
100
+ page_content=' n z s n z f s z e n n ∞ − = = ∑ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
101
+ page_content=' Now we have from equation (4) ( ) ( ) 0 1 1 ( 1) ( , ) ( , ) ( , ) ( ) ( , ) 2 2 !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
102
+ page_content=' n s s n a a n x z x x s ds x f s z s ds f n z x i i n ψ π π ∞ − − = − = Ψ = Γ = − ∑ ∫ ∫ Thus for 0 n ≥ (with the agreent 00 1 = ) 0 ( ) ( , ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
103
+ page_content=' n z k n k k z f n z e z k ϕ ∞ − = = − = ∑ (6) which is one of the fundamental properties of the exponential polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
104
+ page_content=' Note that identity (6) can be used for a meaningful extension of ( ) n z ϕ to ( )z λ ϕ with non- integer index λ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
105
+ page_content=' For instance, we have (with 0 0 λ = ) 1 ( ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
106
+ page_content=' z k k k z e z k λ λ ϕ ∞ − = = ∑ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
107
+ page_content=' Example 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
108
+ page_content=' The generating function for the Hermite polynomials is Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
109
+ page_content=' Boyadzhiev 2 2 0 ( , ) ( ) !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
110
+ page_content=' n xz x n n x x z e H z n ψ ∞ − = = = ∑ with Mellin transform [11, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
111
+ page_content=' 27] 2 /2 2 ( , ) 2 ( 2 ) ( ) z s s s z e D z s − − Ψ = Γ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
112
+ page_content=' where p D are the parabolic cylinder functions [7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
113
+ page_content=' 1065-1067].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
114
+ page_content=' Let now 2 /2 2 ( , ) 2 ( 2 ) z s s f s z e D z − − = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
115
+ page_content=' Then ( ) ( 1) ( , ) n n H z f n z = − − and since ( ) ( 1) ( ) n n n H z H z = − − one finds the classical result 2 2 2 ( ) 2 ( 2 ) n z n n H z e D z = ([7, entry 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
116
+ page_content='253, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
117
+ page_content='1067]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
118
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
119
+ page_content=' CONCLUSIONS In this note we presented a rule how Mellin’s transform can be used to give short proofs of several classical identities connecting, in particular, the Bernoulli and Euler polynomials to the values of the Hurwitz and alternating Hurwitz functions at the negative integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
120
+ page_content=' We also proved identities for the exponential and Hermite polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
121
+ page_content=' REFERENCES [1] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
122
+ page_content=' Amdeberhan, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
123
+ page_content=' Gonzalez, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
124
+ page_content=' Harrison, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
125
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
126
+ page_content=' Moll, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
127
+ page_content=" Straub, “Ramanujan's Master Theorem”, The Ramanujan Journal, 29, 103–120 (2012)." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
128
+ page_content=' [2] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
129
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
130
+ page_content=' Berndt, Ramanujan’s Notebooks, Parts I and II, Springer-Verlag, New York (1985, 1989).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
131
+ page_content=' [3] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
132
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
133
+ page_content=' Boyadzhiev, “Exponential polynomials, Stirling numbers, and evaluation of some Gamma integrals”, Abstract and Applied Analysis, Article ID 168672 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
134
+ page_content=' [4] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
135
+ page_content=' Dahlquist, “On summation formulas due to Plana, Lindelöf and Abel, and related Gauss- Christoffel rules, I”, BIT Numer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
136
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
137
+ page_content=', 37(2), 256-295 (1997);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
138
+ page_content=' “On summation formulas due to Plana, Lindelöf and Abel, and related Gauss-Christoffel rules, II”, BIT Numer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
139
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
140
+ page_content=', 37(4), 804-832 (1997);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
141
+ page_content=' “On Summation Formulas Due to Plana, Lindelöf and Abel, and Related Gauss-Christoffel Rules, III”, BIT Numer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
142
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
143
+ page_content=' 39, 51–78 (1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
144
+ page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
145
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
146
+ page_content=' Edwards, Riemann’s Zeta Function, Academic Press, Boston, (1974).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
147
+ page_content=' [6] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
148
+ page_content=' González, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
149
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
150
+ page_content=' Moll, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
151
+ page_content=' Schmidt, “A generalized Ramanujan Master Theorem applied to the evaluation of Feynman diagrams”, Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
152
+ page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
153
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
154
+ page_content=', 63, 214-230 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
155
+ page_content=' [7] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
156
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
157
+ page_content=' Gradshteyn and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
158
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
159
+ page_content=' Ryzhik, Table of Integrals, Series, and Products, Academic Press, (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
160
+ page_content=' [8] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
161
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
162
+ page_content=' Hardy, Ramanujan, Cambridge University Press, (1940).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
163
+ page_content=' [9] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
164
+ page_content=' Lindelöf, Le Calcul des Résidues et ses Applications à la Theorie des Fonctions, Gauthier-Villars, Paris, (1905).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
165
+ page_content=' [10] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
166
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
167
+ page_content=' Mitrinovic and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
168
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
169
+ page_content=' Keckic, The Cauchy Method of Residues, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
170
+ page_content=' Reidel Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
171
+ page_content=' Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
172
+ page_content=', Dordrecht/Boston, (1984).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
173
+ page_content=' [11] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
174
+ page_content=' Oberhettinger, Tables of Mellin Transforms, Springer-Verlag, New York,1974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
175
+ page_content=' Khristo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
176
+ page_content=' Boyadzhiev [12] Gian-Carlo Rota, Finite Operator Calculus, Academic Press, new York, (1975).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
177
+ page_content=' [13] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
178
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
179
+ page_content=' Widder, An Introduction to Transform Theory, Academic Press, New York, (1971).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
180
+ page_content=' [14] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
181
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
182
+ page_content=' Williams and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
183
+ page_content=' Nan-Yue, “Special values of the Lerch zeta function and the evaluation of certain integrals”, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
184
+ page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
185
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
186
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
187
+ page_content=', 119, 35-49 (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
188
+ page_content=' [15] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
189
+ page_content=' Heap, Notes on the gamma function and the Riemann zeta function (online publication).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
190
+ page_content=' https://wiki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
191
+ page_content='math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
192
+ page_content='ntnu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
193
+ page_content='no/_media/ma3001/2014v/analytisktallteori/the_riemann_zeta_functi on_notes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
194
+ page_content='pdf' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/UtAzT4oBgHgl3EQf0_7l/content/2301.01794v1.pdf'}
W9E1T4oBgHgl3EQfvgXD/content/tmp_files/2301.03401v1.pdf.txt ADDED
@@ -0,0 +1,1467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A reappraisal of the principle of equivalent time based on
2
+ physicochemical methods
3
+ M. Rufinoa, A.L. Lixandrão-Filhoa and S. Guedesa,∗
4
+ aDepartamento de Raios Cósmicos e Cronologia, Grupo de Cronologia, Instituto de Física “Gleb Wataghin", Universidade Estadual de
5
+ Campinas, R. Sérgio Buarque de Holanda, 777 - Cidade Universitária, Campinas - SP, 13083-859, Brazil
6
+ A R T I C L E I N F O
7
+ Keywords:
8
+ Fission-track thermochronology
9
+ Equivalent time
10
+ Effective rate constant
11
+ Physicochemical techniques
12
+ A B S T R A C T
13
+ The main feature of the Fission-Track Thermochronology is its ability to infer the thermal histo-
14
+ ries of mineral samples in regions of interest for geological studies. The ingredients that make
15
+ the thermal history inference possible are the annealing models, which capture the annealing ki-
16
+ netics of fission tracks for isothermal heating experiments, and the Principle of Equivalent Time
17
+ (PET), which allows the application of the annealing models to variable temperatures. It turns
18
+ out that the PET only applies to specific types of annealing models describing single activation
19
+ energy annealing mechanisms (parallel models). However, the PET has been extensively applied
20
+ to models related to multiple activation energy mechanisms (fanning models). This procedure is
21
+ an approximation that has been overlooked due to the lack of a suitable alternative. To deal with
22
+ this difficult, a formalism, based on physicochemical techniques, that allows to quantify the ef-
23
+ fects of annealing on the fission tracks for variable temperatures, is developed. It is independent
24
+ of the annealing mechanism and, therefore, is applicable to any annealing model. In the cases
25
+ in which the PET is valid, parallel models, the proposed method and the PET predict the same
26
+ degrees of annealing. However, deviations appear when the methods are applied to the fanning
27
+ models, with the PET underestimating annealing effects. The consequences for the inference of
28
+ thermal histories are discussed.
29
+ 1. Introduction
30
+ The Principle of Equivalent Time (PET) is one of the basic ingredients for the inference of thermal histories in
31
+ Fission-Track Thermochronology (FTT). The PET states that the rate of track shortening due to temperature is inde-
32
+ pendent of its previous thermal history. Thus, any thermal history may be replaced with a constant temperature heating
33
+ for an equivalent time resulting in the current fission-track length. Since the models that constrain the annealing ki-
34
+ netics are only applicable to constant temperature heating events, it is the PET that allows for the inference of variable
35
+ thermal histories from the fission-track age and from the distribution of fission-track lengths measured in the sample.
36
+ The PET was first proposed by (Goswami et al., 1984). Later on, Duddy et al. (1988) established a practical method
37
+ of finding thermal histories, applying the PET, that has been mostly unchanged since then. They also demonstrated
38
+ that the PET is only valid for the single activation energy Arrhenius annealing equation. Such equation is represented
39
+ as parallel straight isoretention (same fission-track length) curves on the pseudo-Arrhenius space (logarithm of time as
40
+ a function of inverse temperature, Fig. 1a) and is called Parallel Arrhenius equation. However, they applied the PET to
41
+ the Fanning Arrhenius (FA) model (Laslett et al., 1987), in which the isoretention curves diverge from a single point
42
+ with different slopes, implying different activation energies. They recognized that this procedure is an approximation,
43
+ since the PET only applies to parallel models, but argued that their fanning model deviated only slightly from a parallel
44
+ one.
45
+ Annealing models continued to evolve. Carlson (1990) presented a modified version of the parallel model (Fig. 1a).
46
+ Crowley et al. (1991) proposed versions of the Parallel and Fanning equations that are curved in the pseudo-Arrhenius
47
+ space (Fig. 1b), implying activation energies that vary with the temperature of annealing. The Fanning Curvilinear
48
+ (FC) model has been shown to produce better geological extrapolations than the other models for the apatite (Ketcham
49
+ et al., 2007) and zircon (Guedes et al., 2013) fission-track systems. It is currently the model of choice in most geological
50
+ studies using the FTT (Ketcham, 2019) in thermal history codes relying on the PET to apply the annealing equations for
51
+ ∗Principal corresponding author
52
+ [email protected] (M. Rufino); [email protected] (A.L. Lixandrão-Filho); [email protected] (S.
53
+ Guedes)
54
+ ORCID(s): 0000-0003-4871-5120 (M. Rufino); 0000-0002-8343-8942 (A.L. Lixandrão-Filho); 0000-0002-7753-8584 (S. Guedes)
55
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
56
+ Page 1 of 15
57
+ arXiv:2301.03401v1 [physics.geo-ph] 6 Jan 2023
58
+
59
+ athe inference of thermal histories (Ketcham, 2005; Gallagher, 2012). The joint application of the PET and FC equation
60
+ is an approximation that has been overlooked due to the lack of an alternative to deal with the variable temperature
61
+ thermal histories.
62
+ Recently, Rufino and Guedes (2022) applied a physicochemical technique to the Fission-Tack Arrhenius equations
63
+ and were able to formulate the annealing kinetics in terms of the reaction rate constant, which is the fundamental
64
+ quantity related to the activation energy (Arrhenius, 1889; Cohen et al., 2007). The reaction for the annealing process
65
+ is the recombination of displaced atoms and vacant sites that form the track. Once the rate constant is determined for
66
+ the annealing equations, they can be represented in the Arrhenius space (logarithm of the rate constant as a function
67
+ of the inverse temperature) and their trends can be used to retrieve the general mechanisms underlying the Arrhenius
68
+ models. The rate constant encodes the most fundamental features of annealing. Once it is determined, the shortening
69
+ of the fission tracks may in principle be quantified not only for constant temperatures but also for varying ones, being
70
+ an alternative to the PET, without the single activation energy restriction.
71
+ The Arrhenius annealing equations can be derived from the rate constant for the case of constant temperature an-
72
+ nealing (Rufino and Guedes, 2022). The next step is to apply the physicochemical approach to the variable temperature
73
+ annealing of the fission tracks and compare the results to the ones obtained with the PET. The length shortening of
74
+ fission tracks is calculated for cooling temperature-time (T-t) paths with different slopes using the parallel, fanning and
75
+ Carlson models as they are the representations of different activation energy mechanisms. The temperature indexes
76
+ Closure and Total Annealing temperatures, calculated using the PET and the rate constant techniques, are presented
77
+ and compared to illustrate the differences between both approaches for the geological extrapolation.
78
+ 2. Method
79
+ 2.1. The physicochemical perspective of fission track annealing
80
+ The kinetics of chemical reactions can be described by the Arrhenius equation (Arrhenius, 1889), which relates
81
+ the temperature derivative of the reaction rate 푘, the universal gas constant 푅 and a constant 푞, related to a change in
82
+ the standard internal energy (Laidler, 1984, p.494):
83
+ d ln 푘(푇 )
84
+ d푇
85
+ =
86
+
87
+ 푅푇 2 .
88
+ (1)
89
+ Eq. (1) can be solved for the reaction rate as a function of temperature 푘(푇 ) using a pre-exponential factor 퐴 and
90
+ the Arrhenius activation energy 퐸푎:
91
+ 푘(푇 ) = 퐴 exp (−퐸푎∕푅푇 ) .
92
+ (2)
93
+ Chemical processes that obey Eq. (2) result in straight lines with slope −퐸푎∕푅 in Arrhenius plots (ln 푘 × 1∕푇 ).
94
+ Among the fission-track annealing models, the Parallel Arrhenius is the only one that actually fits this formulation
95
+ of a single constant activation energy. Deviations from Eq. (2) are quite common. To enable a more complete study
96
+ of chemical reactions, the International Union of Pure and Applied Chemistry (IUPAC) has defined the Arrhenius
97
+ activation energy (Cohen et al., 2007):
98
+ 퐸푎 = −푅 d ln(푘)
99
+ d(1∕푇 ).
100
+ (3)
101
+ The Arrhenius activation energy, as defined by Eq. (3), is an empirical quantity aimed to be a kinetic parameter that
102
+ can vary with the temperature of the reaction medium. Its determination depends on the previous knowledge of the rate
103
+ constant, the quantity that encodes the reaction kinetics. Thus, for application to the fission-track system, the reaction
104
+ rate constant associated with the annealing mechanisms must be found, which can be done using the formalism of
105
+ studies in solid state processes (Vyazovkin, 2015).
106
+ The annealing kinetics of fission tracks is described by empirical (Laslett et al., 1987; Crowley et al., 1991; Laslett
107
+ and Galbraith, 1996; Rana et al., 2021) or semi-empirical (Carlson, 1990; Guedes et al., 2006, 2013) equations relating
108
+ the reduced track length, 푟 = 퐿∕퐿0 (where 퐿 is the length of the fission track after heating and 퐿0 is the unannealed
109
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
110
+ Page 2 of 15
111
+
112
+ fission-track length), with the duration, 푡, of the constant temperature (푇 ) heating. The general form of the annealing
113
+ equations is:
114
+ 푔(푟) = 푓(푡, 푇 ),
115
+ (4)
116
+ in which 푔(푟) is a transformation of 푟 and 푓(푡, 푇 ) defines the geometrical characteristics of the isoretention curves
117
+ in the pseudo-Arrhenius space (ln 푡 × 1∕푇 , Fig. 1). The Parallel Arrhenius (PA, Eq. (PA1)) and Fanning Arrhenius
118
+ (FA, Eq. (FA1)) equations (Laslett et al., 1987), the Parallel Curvilinear (PC, Eq. (PC1)) and Fanning Curvilinear
119
+ (FC, Eq. (FC1)) models (Crowley et al., 1991) as well as the Carlson Model (CM, Eq. (CM1)) that mixes the Parallel
120
+ Arrhenius and Parallel Curvilinear models in the same equation (Carlson, 1990), are used in this analysis. The trans-
121
+ formation function 푔(푟) = ln(1 − 푟) was chosen because it carries no fitting parameters and was shown to produce
122
+ good fits to annealing data (Guedes et al., 2022). In addition, it arises naturally from the physicochemical formulation
123
+ of the fission track annealing (Rufino and Guedes, 2022), as will be shown below. More comprehensive descriptions
124
+ of the annealing models can be found elsewhere (Carlson, 1990; Ketcham, 2019; Guedes et al., 2022).
125
+ (a)
126
+ (b)
127
+ Figure 1: Representation of the Arrhenius fission-track annealing models in the pseudo-Arrhenius plot.
128
+ (a) Fanning
129
+ Arrhenius, Parallel Arrhenius, and Carlson models. (b) Fanning Curvilinear and Parallel Curvilinear models. Laboratory
130
+ annealing data are c-axis projected reduced fission-track lengths from Durango apatite (Carlson et al., 1999). Data from
131
+ the geological benchmark KTB (Wauschkuhn et al., 2015) are included only for reference. The models are represented as
132
+ isoretention curves. Points on these curves are the temperature and time of constant temperature heating resulting in the
133
+ same reduced length.
134
+ The annealing data set on the c-axis projected fission tracks for Durango apatite (Carlson et al., 1999) was used for
135
+ model fitting. Durango apatite annealing data was chosen because Durango is a well-known standard sample often used
136
+ in methodological studies (Green et al., 1986; Carlson, 1990; Ketcham et al., 1999, 2007; Rana et al., 2021; Guedes
137
+ et al., 2022; Rufino and Guedes, 2022). The fitting parameters for PA, PC, FA and FC models are the same presented in
138
+ Rufino and Guedes (2022). They were numerically determined using the function nlsLM of the package minpack.lm
139
+ (Elzhov et al., 2016) written in R language, which applies the Levenberg– Marquardt algorithm to minimize the residual
140
+ sum of square (RSS), using the squared inverse of 푟 uncertainties as weights. With the same method, fitting parameters
141
+ were also obtained for the CM model. The fitting parameters are presented in the last column of Table 1.
142
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
143
+ Page 3 of 15
144
+
145
+ 40
146
+ Model
147
+ r = 0.55
148
+ =0.9
149
+ ParallelArrhenius
150
+ In(time), time in hours
151
+ Carlson Model
152
+ 20
153
+ Fanning Arrhenius
154
+ 88880
155
+ 0
156
+ Length reduction exp. data
157
+ -20
158
+ r> 0.9
159
+ A
160
+ 0.8 < r <0.9
161
+ 0.7 < r < 0.8
162
+ -40
163
+
164
+ 0.6
165
+ 5 < r<0.7
166
+ 0
167
+ 1
168
+ 2
169
+ 3
170
+ 4
171
+ 1000/T, K-140
172
+ r = 0.55
173
+ Model
174
+ r = 0.9
175
+ Parallel Curvilinear
176
+ In(time), time in hours
177
+ AM
178
+ Fanning Curvilinear
179
+ 20
180
+ 88880
181
+ 0
182
+ Length reduction exp. data
183
+ -20
184
+ r> 0.9
185
+ A
186
+ 0.8 < r < 0.9
187
+ 0.7 < r < 0.8
188
+ -40
189
+
190
+ 0.6
191
+ 5 < r<0.7
192
+ 0
193
+ 1
194
+ 2
195
+ 3
196
+ 4
197
+ 1000/T, K-1Table 1
198
+ Effective reaction rate constant, 푟 reduction and equivalent time equations associated with
199
+ the fission-track annealing models
200
+ FT models
201
+ Equations
202
+ Parameters (standard error)
203
+ Parallel Arrhenius
204
+ (PA)
205
+ 푓푃퐴(푡, 푇 ) = 푐0 + 푐1 ln(푡) + 푐2
206
+ 푅푇
207
+ (PA1)
208
+ 푘푒푓(푇 )푃퐴 = 푐1푒푐0∕푐1 exp
209
+ (푐2∕푐1
210
+ 푅푇
211
+ )
212
+ (PA2)
213
+ 퐸푃퐴
214
+
215
+ = −푐2
216
+ 푐1
217
+ (PA3)
218
+ 푐0 = 5.631 (0.220)
219
+ 푐1 = 0.1865 (0.0066)
220
+ 푐2 = -10.46 (0.31) kcal/mol
221
+ 휒2
222
+ 휈 = 2.65
223
+ Parallel curvilinear
224
+ (PC)
225
+ 푓푃퐶(푡, 푇 ) = 푐0 + 푐1 ln(푡) + 푐2 ln
226
+ ( 1
227
+ 푅푇
228
+ )
229
+ (PC1)
230
+ 푘푒푓(푇 )푃퐶 = 푐1푒푐0∕푐1 (푅푇 )−푐2∕푐1
231
+ (PC2)
232
+ 퐸푃퐶
233
+ 푎 (푇 ) = −푐2
234
+ 푐1
235
+ 푅푇
236
+ (PC3)
237
+ 푐0 = -4.910 (0.096)
238
+ 푐1 = 0.1944 (0.0060)
239
+ 푐2 = -9.610 (0.244)
240
+ 휒2
241
+ 휈 = 2.12
242
+ Carlson Model
243
+ (CM)
244
+ 푓퐶푀(푡, 푇 ) = 푐0 + 푐1 ln(푡) + 푐1 ln(푅푇 ) + 푐2
245
+ 푅푇
246
+ (CM1)
247
+ 푘푒푓(푇 )퐶푀 = 푐1푒푐0∕푐1 exp
248
+ (푐2∕푐1
249
+ 푅푇
250
+ )
251
+ 푅푇
252
+ (CM2)
253
+ 퐸퐶푀
254
+
255
+ (푇 ) = −푐2
256
+ 푐1
257
+ + 푅푇
258
+ (CM3)
259
+ 푐0 = 5.426 (0.2155)
260
+ 푐1 = 0.1867 (0.0066)
261
+ 푐2 = -10.25 (0.2994) kcal/mol
262
+ 휒2
263
+ 휈 = 2.63
264
+ Fanning Arrhenius
265
+ (FA)
266
+ 푓퐹퐴(푡, 푇 ) = 푐0 + 푐1
267
+ ln(푡) − 푐2
268
+ 1
269
+ 푅푇 − 푐3
270
+ (FA1)
271
+ 푘푒푓(푡, 푇 )퐹퐴 =
272
+ 푐1 exp[(1 − 푛)푓퐹퐴(푡, 푇 )]
273
+ 푡(1 − 푐3푅푇 )
274
+ (FA2)
275
+ 퐸퐹퐴
276
+ 푎 (푡, 푇 ) =
277
+ (푅푇 )2 [푐1(푛 − 1)(푐2 − ln 푡) − 푐3 + 1∕푅푇 ]
278
+ (푐3푅푇 − 1)2
279
+ (FA3)
280
+ 푐0 = -8.518 (1.072)
281
+ 푐1 = 0.1266 (0.0191) mol/kcal
282
+ 푐2 = -20.99 (5.81)
283
+ 푐3 = 0.2985 (0.1026) mol/kcal
284
+ 휒2
285
+ 휈 = 1.66
286
+ 0.5 ≤ 푛 < 1
287
+ Fanning curvilinear
288
+ (FC)
289
+ 푓퐹퐶(푡, 푇 ) = 푐0 + 푐1
290
+ ln(푡) − 푐2
291
+ ln
292
+ (
293
+ 1
294
+ 푅푇
295
+ )
296
+ − 푐3
297
+ (FC1)
298
+ 푘푒푓(푡, 푇 )퐹퐶 =
299
+ 푐1 exp[(1 − 푛)푓퐹퐶(푡, 푇 )]
300
+
301
+ (
302
+ ln
303
+ (
304
+ 1
305
+ 푅푇
306
+ )
307
+ − 푐3
308
+ )
309
+ (FC2)
310
+ 퐸퐹퐶
311
+ 푎 (푡, 푇 ) =
312
+ 푅푇
313
+ (
314
+ 푐1푐2푛 − 푐1푐2 + (푐1 − 푐1푛) ln(푡) − 푐3 + ln
315
+ (
316
+ 1
317
+ 푅푇
318
+ ))
319
+ (
320
+ 푐3 − ln
321
+ (
322
+ 1
323
+ 푅푇
324
+ ))2
325
+ (FC3)
326
+ 푐0 = -9.449 (1.480)
327
+ 푐1 = 0.1627 (0.0298)
328
+ 푐2 = -24.58 (7.75)
329
+ 푐3 = -0.8626 (0.1549)
330
+ 휒2
331
+ 휈 = 1.88
332
+ 0.5 ≤ 푛 < 1
333
+ Notes: 1. For each fission-track annealing model (Eqs. (PA1), (PC1), (CM1) (FA1), (FC1)), the reaction rate constants,
334
+ 푘푒푓 and Arrhenius Activation energies were obtained using 푔(푟) = ln(1 − 푟) and 푓푟(푟) = (1 − 푟)푛. 2. Rate constants (Eqs.
335
+ (PA2), (PC2), (CM2) (FA2), (FC2)) were calculated after Eq. (15). 3. Arrhenius activation energies (Eqs. (PA3), (PC3),
336
+ (CM3) (FA3), (FC3)) were obtained by the application of Eq. (3), and are average values for constant heating
337
+ experiments.
338
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
339
+ Page 4 of 15
340
+
341
+ Fission tracks are formed by displaced atoms and vacant sites, in concentrations high enough to change the structure
342
+ of the mineral in a volume of about 2-10 nm in diameter and around 20 휇m in length. The annealing process is the
343
+ recombination of defects and vacancies, which also changes the neighbor structure and consequently the recombination
344
+ rate. This kind of solid-state reaction is described by the conversion rate equation (Vyazovkin, 2015):
345
+ d훼
346
+ d푡 = 푘(푇 )푓훼(훼).
347
+ (5)
348
+ The Eq. (5) relates the rate of conversion of the reactant 훼 with the constant rate and with the reaction function
349
+ 푓훼(훼). For the fission tracks, 훼 is the concentration of recombined atoms, and 푓훼(훼) describes how the recombination
350
+ process changes the surrounding structure. The track length can be used as a proxy for the concentration of displaced
351
+ atoms (Rufino and Guedes, 2022) and:
352
+ 훼 = 퐿0 − 퐿
353
+ 퐿0
354
+ = 1 − 푟
355
+ (6)
356
+ and with this change of variable:
357
+ d푟
358
+ d푡 = −푘푒푓(푡, 푇 )푓푟(푟).
359
+ (7)
360
+ The rate constant has been replaced with an effective rate constant, 푘푒푓(푡, 푇 ), which may depend on time and
361
+ temperature and is suitable to describe more complex reactions (Vyazovkin, 2016). For the reaction function, the
362
+ reaction-order function has already been shown to produce consistent results mainly for the single activation energy
363
+ mechanisms of annealing (Green et al., 1988; Rufino and Guedes, 2022):
364
+ 푓푟 = (1 − 푟)푛
365
+ (8)
366
+ in which 푛 is the reaction order.
367
+ Eq. (7) is a differential equation that can be solved by the separation of variables. To define the limits of the integral,
368
+ consider that at the beginning of the thermal history (푡 = 0), the track is unannealed (푟 = 1). After a heating duration
369
+ 푡, the track length has been shortened to 푟. Then:
370
+
371
+ 1
372
+
373
+ d푟
374
+ 푓푟(푟) = ∫
375
+
376
+ 0
377
+ −푘푒푓(푡, 푇 )d푡.
378
+ (9)
379
+ Eq. (9) is the basic equation from which the annealing kinetics can be studied from a physicochemical perspective.
380
+ Once the reaction function and the rate constant are chosen, the dependence of the reduced fission-track length can be
381
+ calculated over any T-t path. Let’s start with the known case of constant temperature heating, from which the annealing
382
+ equations should be obtained. For the single activation energy models, PA, PC, and CM, the rate constants are given
383
+ by:
384
+ 푘푒푓(푇 )푃 퐴 = 퐴1 exp
385
+ (−푄1
386
+ 푅푇
387
+ )
388
+ ,
389
+ (10a)
390
+ 푘푒푓(푇 )푃 퐶 = 퐴2(푅푇 )푚,
391
+ (10b)
392
+ 푘푒푓(푇 )퐶푀 = 퐴3(푅푇 ) exp
393
+ (
394
+ − 푄3
395
+ 푅푇
396
+ )
397
+ ,
398
+ (10c)
399
+ where 퐴푖, 푄푖, and 푚 are constants. Eq. (10a) is the original Arrhenius equation from which the PA equation is derived.
400
+ 푄1 can be directly identified with the activation energy only in this case. Eq. (10b) generates the PC equation with
401
+ a temperature-dependent activation energy (Table 1, Eq. (PC3)). Eq. (10c) generates the Carlson Model, also with a
402
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
403
+ Page 5 of 15
404
+
405
+ temperature-dependent activation energy (Table 1, Eq. (CM3)). It is the product of Eqs. (10a) and (10b), with 푚 = 1,
406
+ and has been proposed soon after the original Arrhenius equation to deal with reactions that deviate from the expected
407
+ Arrhenius behavior (Kooij, 1893). Note that although the activation energies in the Eqs. (10b) and (10c) depend on
408
+ temperature, they still fall into the category of single activation energy processes, meaning that all recombination events
409
+ at a given temperature have the same activation energy.
410
+ Annealing experiments are isothermal heating procedures. Then, substituting the effective rate constants (Eqs. (10))
411
+ into the integral equation (Eq. 9) together with the reaction-order function defined in Eq. (8) and solving it considering
412
+ the temperature as a constant results in:
413
+ ln(1 − 푟) =
414
+ ln [퐴1(1 − 푛)]
415
+ 1 − 푛
416
+ +
417
+ 1
418
+ 1 − 푛 ln(푡) −
419
+ 푄1
420
+ 1 − 푛
421
+ 1
422
+ 푅푇 ,
423
+ (11a)
424
+ ln(1 − 푟) = ln[퐴2(1 − 푛)]
425
+ 1 − 푛
426
+ +
427
+ 1
428
+ 1 − 푛 ln(푡) −
429
+
430
+ 1 − 푛 ln
431
+ ( 1
432
+ 푅푇
433
+ )
434
+ ,
435
+ (11b)
436
+ ln(1 − 푟) =
437
+ ln [퐴3(1 − 푛)]
438
+ 1 − 푛
439
+ +
440
+ 1
441
+ 1 − 푛 ln(푡) −
442
+ 푄3
443
+ 1 − 푛
444
+ 1
445
+ 푅푇 −
446
+ 1
447
+ 1 − 푛 ln
448
+ ( 1
449
+ 푅푇
450
+ )
451
+ ,
452
+ (11c)
453
+ which are the equations for the PA (11a), PC (11b), CM (11c) models with 푔(푟) = ln(1 − 푟). For the chosen reaction
454
+ function, the integral only has a real solution if 푛 < 1. Comparing the right sides of these equations respectively with
455
+ Eqs. (PA1), (PC1), and (CM1), one can find out that the rate constant parameters are related to the fitting parameters
456
+ of the annealing equations as
457
+ PA ∶ 푛 = 푐1 − 1
458
+ 푐1
459
+ 푄1 = −푐2
460
+ 푐1
461
+ 퐴1 = 푐1 exp(푐0∕푐1
462
+ )
463
+ (12)
464
+ PC ∶ 푛 = 푐1 − 1
465
+ 푐1
466
+ 푚 = −푐2
467
+ 푐1
468
+ 퐴2 = 푐1 exp(푐0∕푐1
469
+ )
470
+ (13)
471
+ CM ∶ 푛 = 푐1 − 1
472
+ 푐1
473
+ 푄3 = −푐2
474
+ 푐1
475
+ 퐴3 = 푐1 exp(푐0∕푐1
476
+ )
477
+ (14)
478
+ In this way, the rate constants can be expressed in terms of the fitting parameters of the annealing models as shown
479
+ in Eqs. (PA2), (PC2), and (CM2) of Table 1. The values for the reaction order 푛 for the three models are 푛 ≈ −4, in
480
+ agreement with a similar analysis carried out by Green et al. (1988) for the PA model. Therefore, the parallel models
481
+ are not compatible with first-order annealing kinetics, meaning that the neighbor structure has a strong influence on
482
+ the rate of defect recombination during annealing.
483
+ There are no obvious expressions for the rate constant for the fanning models. A physicochemical analysis of their
484
+ trends indicates that multiple concurring processes with different activation energies are occurring during the annealing
485
+ of the fission tracks (Rufino and Guedes, 2022), in agreement with previous suggestions (Green et al., 1988; Tamer
486
+ and Ketcham, 2020). Rufino and Guedes (2022) derived an expression from Eq. (7) to find the effective rate constant
487
+ from the annealing model:
488
+ 푘푒푓(푡, 푇 ) = −
489
+ 1
490
+ 푓푟(푟)
491
+ [휕푔(푟)
492
+ 휕푟
493
+ ]−1 휕푓(푡, 푇 )
494
+ 휕푡
495
+ |||||푇
496
+ (15)
497
+ Eq. (15) provides a direct way to calculate this effective reaction rate constant from the model functions that fit the
498
+ experimental annealing data, 푓(푡, 푇 ) and 푔(푟), and from the reaction function 푓푟(푟). The partial derivative in relation to
499
+ time is taken because the annealing models were designed to describe constant temperature experiments. As a check,
500
+ before applying Eq. (15) to the fanning models, one can show that Eqs. (PA2), (PC2), and (CM2) are found by the
501
+ application of Eq. (15) respectively to Eqs. (PA1), (PC1), and (CM1), with 푔(푟) = ln(1 − 푟) and 푓푟(푟) = (1 − 푟)푛.
502
+ The same procedure can be applied to Eq. (FA1) and (FC1) to find the effective reaction rates respectively for the
503
+ Fanning Arrhenius (Eq. (FA2)) and Fanning Curvilinear (Eq. (FC2)) models. An alternative way to infer the effective
504
+ rate constants for the FA and FC models is departing from the hypothesis that the Arrhenius activation energies and,
505
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
506
+ Page 6 of 15
507
+
508
+ therefore, the rate constants are dependent on the fission-track reduced length. Then, integration of Eq. (9), on the
509
+ isothermal condition, results in
510
+ − ∫
511
+
512
+ 1
513
+ d푟
514
+ 푓푟(푟)푘푒푓(푟) = ∫
515
+
516
+ 0
517
+ d푡 = 푡.
518
+ (16)
519
+ It can be shown that the primitive functions that make Eq. (16) true for the FA and FC models, with 푓푟(푟) = (1−푛)푛
520
+ and 푔(푟) = ln(1 − 푟) are the ones with the effective rate constants given by Eqs. (FA2) and (FC2). This approach also
521
+ illustrate how the incorporation of the time in the rate constant and, therefore, in the activation energies for the fanning
522
+ models are implied from the dependence of the activation energies on the values of 푟.
523
+ To obtain the reaction order 푛 for the FA and FC models, the effective reaction rate constants given by Eqs. (FA2)
524
+ and (FC2) are integrated with Eq. (9) considering constant temperatures (isothermal experiments):
525
+
526
+
527
+ 1
528
+ d푟
529
+ (1 − 푟)푛 = − ∫
530
+
531
+ 0
532
+ 푐1
533
+ 1
534
+ 푅푇 − 푐3
535
+ 1
536
+ 푡 exp
537
+
538
+
539
+ ⎢⎣
540
+ −(푛 − 1)
541
+
542
+
543
+ ⎜⎝
544
+ 푐0 + 푐1
545
+ ln 푡 − 푐2
546
+ 1
547
+ 푅푇 − 푐3
548
+
549
+
550
+ ⎟⎠
551
+
552
+
553
+ ⎥⎦
554
+ d푡
555
+ (17a)
556
+
557
+
558
+ 1
559
+ d푟
560
+ (1 − 푟)푛 = − ∫
561
+
562
+ 0
563
+ 푐1
564
+ ln
565
+ (
566
+ 1
567
+ 푅푇
568
+ )
569
+ − 푐3
570
+ 1
571
+ 푡 exp
572
+
573
+
574
+
575
+ ⎢⎣
576
+ −(푛 − 1)
577
+
578
+
579
+
580
+ ⎜⎝
581
+ 푐0 + 푐1
582
+ ln 푡 − 푐2
583
+ ln
584
+ (
585
+ 1
586
+ 푅푇
587
+ )
588
+ − 푐3
589
+
590
+
591
+
592
+ ⎟⎠
593
+
594
+
595
+
596
+ ⎥⎦
597
+ d푡
598
+ (17b)
599
+ With the necessary condition of 푛 < 1, the solution of the integral equation (17a) is
600
+ (1 − 푟) = (−1)−1∕(푛−1) exp
601
+
602
+
603
+ ⎢⎣
604
+ 푐0 + 푐1
605
+ ln(푡) − 푐2
606
+ 1
607
+ 푅푇 − 푐3
608
+
609
+
610
+ ⎥⎦
611
+ (18)
612
+ As the solution of this equation is to represent the shortening of the fission tracks, (1 − 푟) must be a real value
613
+ between 0 and 1, which is true only if −1∕(푛 − 1) is an even and positive integer value 2푗. Then, the values of 푛 are
614
+ restricted to
615
+ 푛 = 2푗 − 1
616
+ 2푗
617
+ ,
618
+ (19)
619
+ where 푗 = 1, 2, 3, .... With this condition and 푔(푟) = ln(1 − 푟), the FA model (Table 1, Eq.(FA1)) is recovered. The
620
+ solutions for Eqs. (17a) and (17b) are similar, differing only on the logarithm of 1∕푅푇 for the FC model instead
621
+ of the 1∕푅푇 for the FA model, which are both constants in this case. The previous analysis holds also for the FC
622
+ model. The values of 푛 will be fractional for FA and FC (푛 = 1∕2, 3∕4, 5∕6, 7∕8, ...), according to Eq. (19). Fractional
623
+ reaction orders are characteristics of multiple-step reactions or some more complex kinetic mechanism, as it has been
624
+ explained for the decomposition of acetaldehyde (Laidler et al., 1965), a well know example of fractional reaction order
625
+ in chemistry. However, for the fission tracks, where the displaced atoms and vacant sites take the role of reactants and
626
+ the deformed track structure is the reaction medium, explanations of the kinetics of a single reactant via a mean-
627
+ field approximation (MFA) may not be appropriate (Córdoba-Torres et al., 2003). Thus, for the effective reaction
628
+ rate constant 푘푒푓 of the fanning annealing models, mechanistic modeling considering the intermediate steps, i.e.,
629
+ recognizing the reaction order of each mechanism involved in annealing, would be desirable to elucidate the meaning
630
+ of the fractional reaction order found (Koga et al., 1992). The rate constants for FA and FC are to be viewed as effective
631
+ equations constraining the general behavior of annealing but that does not allow the description of the specifics of the
632
+ annealing kinetics.
633
+ In this physicochemical framework of the fission-track annealing, the effective reaction rate constant, 푘푒푓(푡, 푇 ),
634
+ and the reaction function, 푓푟(푟), are the fundamental building blocks from which the fission-track annealing kinetics
635
+ can be studied. The application to the constant temperature annealing made it possible to determine the rate constant
636
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
637
+ Page 7 of 15
638
+
639
+ parameters from the empirically determined parameters of the annealing equations. The calculation of the Arrhenius
640
+ activation energies (퐸푎) for different models becomes possible through Eq. (3). The Arrhenius activation energies of
641
+ the parallel annealing models (Eq. PA3, PC3 and CM3) will be constants with respect to the variable 푡. As for the
642
+ fanning equations, 퐸푎 will vary with time and temperature (Eq. FA3 and FC3). However, the main advantage of this
643
+ approach is the possibility of calculating the fission-track length reduction over any T-t path using Eq. (9), without
644
+ recurring to the interactive application of the Principle of Equivalent Time.
645
+ 2.2. Fission track annealing under variable temperature thermal histories
646
+ Fission-track thermal history inference is based on the Principle of Equivalent Time (PET) (Goswami et al., 1984;
647
+ Duddy et al., 1988), which is an interactive method that allows the application of isothermal annealing models to
648
+ variable temperature T-t paths. It is detailed in Appendix A. In general, a given variable temperature thermal history
649
+ is divided into finite time intervals Δ푡푖, centered at times 푡푖 and temperatures 푇푖. At the time interval in which the
650
+ population was born, a first reduced length is calculated by applying the annealing equation, using the temperature of
651
+ the T-t path and the duration of the interval (푇푖, Δ푡푖). In the next interval, at a different temperature on T-t path, the
652
+ annealing model is used to find an equivalent time capable of producing the same length shortening of the previous
653
+ interval but at the new temperature. A new length shortening is then calculated by applying the annealing model to
654
+ the period of time that is the sum of the equivalent time and the length of the time interval. This procedure is repeated
655
+ and at any given temperature, 푇푖 on the T-t path, an equivalent time, 휏푖, which reproduces the length shortening at the
656
+ previous interval, 푟푖−1, is determined, so that the new length shortening can be calculated as if the track had been at
657
+ the same constant temperature from the beginning. The reduced length is updated (푟푖) by calculating it as a result of
658
+ heating at 푇푖 for the duration 휏푖 + Δ푡푖. The hypothesis that the annealing kinetics does not depend on the previous
659
+ thermal history of the track, but only on its current length so that any previous T-t path can be replaced with a constant
660
+ temperature heating resulting in this length, is the basis of this procedure and defines the Principle of Equivalent Time.
661
+ This means, in practice, that the track will have no memory of the material conditions of time and temperature of its
662
+ previous shortenings. The equations for the application of the PET with the PA, PC, CM, FA, and FC models can be
663
+ found in Table A1 in Appendix A.
664
+ The physicochemical tool presented in the previous section provides an alternative way to access variable temper-
665
+ ature annealing kinetics by solving the integral in the right side of Eq. (9) over a T-t path. Eq. (9) is solved as a line
666
+ integral. A suitable parameterization is:
667
+ 푠 =
668
+ {
669
+ 푇 = 푇 (푢)
670
+ 푡 = 푢
671
+ ,
672
+ d푠 =
673
+
674
+ 1 +
675
+ (d푇
676
+ d푢
677
+ )2
678
+ d푢.
679
+ (20)
680
+ Implementing the parameterized variables on the right side of the integral equation (Eq. 9),
681
+ 퐼 = ∫
682
+
683
+ 0
684
+ 푘푒푓 (푡(푢), 푇 (푢))
685
+ d푠
686
+
687
+ 1 +
688
+ (
689
+ d푇
690
+ d푢
691
+ )2
692
+ ⟹ 퐼 = ∫
693
+
694
+ 0
695
+ 푘푒푓 (푡(푢), 푇 (푢)) d푢.
696
+ (21)
697
+ Solving the left side of Eq. (9) for the 푓푟(푟) function given by Eq. (8) and the parameterized integral for the rate
698
+ constant (Eq. (21)), the reduced length, after the track has experienced the thermal history given by the T-t path, is
699
+ 푟 = 1 −
700
+ (
701
+ (1 − 푛) ∫
702
+
703
+ 0
704
+ 푘푒푓 (푡(푢), 푇 (푢)) d푢
705
+ )1∕1−푛
706
+ ,
707
+ (22)
708
+ in which 푛 < 1 as usual. At a first glance, the advantage of the Rate Constant path Integral (RCI, Eq. (22)) is that it is a
709
+ one-shot calculation of the reduced track length. In addition, there was no need to restrict the form of the rate constant
710
+ function and therefore the annealing mechanism it is related to.
711
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
712
+ Page 8 of 15
713
+
714
+ 3. Results and Discussion
715
+ The RCI Eq. (22) can be applied to calculate the shortening in the reduced length of a single fission-track population
716
+ submitted to any T-t path. The same calculation can be carried out using the interactive technique based on the Principle
717
+ of Equivalent Time (PET). To compare the outcomes of the two methods, both calculations will be performed for the
718
+ parallel (including CM) and fanning models. The case will be made for the linear cooling, 푇 (푡) = 푇0 − ̇푇 푡, where ̇푇
719
+ is the cooling rate and 푇0 is the temperature at the time the track was generated. The temperature at the end of the T-t
720
+ path (present time) was fixed to be 20 ◦C (293.15 K) for this analysis.
721
+ 3.1. Parallel models
722
+ To solve the RCI, the effective rate constant functions for the parallel models (Table 1, Eqs. (PA2), (PC2), and
723
+ (CM2)) are inserted in Eq. (22) with the variable 푇 replaced with 푇 (푡) = 푇0 − ̇푇 푡 wherever it appears. The analytical
724
+ solutions for the reduced length shortening calculated for the PA, PC, and CM are
725
+ 푟푃 퐴 = 1 −
726
+
727
+
728
+
729
+
730
+ ⎜⎝
731
+ 푒푐0∕푐1
732
+ (
733
+ 푐2Ei
734
+ (
735
+ 푐2
736
+ 푐1푅(푇0− ̇푇 푡)
737
+ )
738
+ − 푐2Ei
739
+ (
740
+ 푐2
741
+ 푐1푅푇0
742
+ )
743
+ + 푐1푅
744
+ (
745
+ ( ̇푇 푡 − 푇0)푒
746
+ 푐2
747
+ 푐1푅(푇0− ̇푇 푡) + 푇0푒
748
+ 푐2
749
+ 푐1푅푇0
750
+ ))
751
+ 푐1 ̇푇 푅
752
+
753
+
754
+
755
+
756
+ ⎟⎠
757
+ 푐1
758
+ ,
759
+ (23a)
760
+ 푟푃 퐶 = 1 −
761
+
762
+
763
+
764
+
765
+ ⎜⎝
766
+ 푐1푒푐0∕푐1
767
+ (
768
+ ( ̇푇 푡 − 푇0)(푅(푇0 − ̇푇 푡))
769
+ − 푐2
770
+ 푐1 + 푇0(푅푇0)
771
+ − 푐2
772
+ 푐1
773
+ )
774
+ 푐1 ̇푇 − 푐2 ̇푇
775
+
776
+
777
+
778
+
779
+ ⎟⎠
780
+ 푐1
781
+ ,
782
+ (23b)
783
+ 푟퐶푀 = 1 − 2−푐1푒푐0 푐−2푐1
784
+ 1
785
+ ( ̇푇 푅)푐1
786
+ [
787
+ 푐1푅
788
+ (
789
+ 푇0푒
790
+ 푐2
791
+ 푐1푅푇0 (푐1푅푇0 + 푐2) − (푇0 − ̇푇 푡)푒
792
+ 푐2
793
+ 푐1푅푇0−푐1 ̇푇 푅푡 (푐1푅(푇0 − ̇푇 푡) + 푐2)
794
+ )
795
+ +푐2
796
+ 2
797
+ (
798
+ Ei
799
+ (
800
+ 푐2
801
+ 푐1푅푇0 − 푐1푅푡 ̇푇
802
+ )
803
+ − Ei
804
+ (
805
+ 푐2
806
+ 푐1푅푇0
807
+ ))]푐1
808
+ ,
809
+ (23c)
810
+ where Ei is the exponential integral function. Eqs. (23a) - (23c) give the resulting reduced length 푟 for the parallel
811
+ models, as functions of the three variables that characterize the thermal history: the duration of the T-t path (푡), the
812
+ cooling rate ( ̇푇 ), and the temperature at the time when the track was born (푇0). The parameters 푐푖 are given in the last
813
+ column of Table 1. The values of 푟 for the cooling path with the cooling rate ̇푇 = 1.0◦C/Ma calculated with the three
814
+ parallel models are presented in Fig. 2. For each point, the value of 푟 is the length reduction after a linear cooling
815
+ duration 푡 and measured in the present. Values of 푟 = 0 mean that the tracks have been erased before the present.
816
+ Values obtained by the RCI solutions (Eqs. (23a) - (23c)) are represented as red curves marked with red circles and the
817
+ values calculated using the PET are represented as blue curves marked with blue squares. RCI and PET calculations
818
+ produce very close values of 푟 for the three parallel models (Figs. 2a, 2b, 2c).
819
+ The temperature indexes Closure Temperature (푇퐶) and Total Annealing Temperature (푇퐴) were also calculated
820
+ for the three parallel models, applying both methods of calculation, for cooling T-t paths with cooling rates of 1, 10,
821
+ and 100 ◦C/Ma. 푇퐶 is, for a monotonic cooling thermal history, the temperature at the apparent sample age (Dodson,
822
+ 1973). 푇퐴 is the age of the oldest track that has not been erased and can be counted in the sample (Issler, 1996).
823
+ Details for the method of calculation of the index temperatures can be found in Guedes et al. (2013). 푇퐶 and 푇퐴 are
824
+ meaningful quantities that allow quantifying the impact of using the RCI instead of the interactive PET calculation.
825
+ The uncertainties in 푇퐶 and in 푇퐴 were estimated by simple error propagation of apparent (푇퐶) or retention (푇퐴) ages
826
+ and present time temperature. Results are shown in Table 2. Setting the PET results as the reference values, given
827
+ that PET is the method established in the literature, a relative error analysis can be carried out to verify the internal
828
+ consistency between PET and RCI calculations. The relative error between PA PET and PA RCI is on average 0.69%
829
+ for 푇퐶 and 1.19% 푇퐴. The same trend is found for calculations of 푇퐶 and 푇퐴 with PC and CM: 0.66% and 0.58% (PC)
830
+ and 0.7% and 1.19% (CM). All errors are well inside the estimated uncertainties for temperature index values and are
831
+ most probably artifacts of the PET numerical calculation.
832
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
833
+ Page 9 of 15
834
+
835
+ ,
836
+ (a)
837
+ (b)
838
+ (c)
839
+ Figure 2: Values of the reduced track lengths (푟), after a linear cooling ( ̇푇 = 1.0◦C/Ma) starting at time 푡 and ending at
840
+ the present time at a fixed temperature of 20◦C, calculated using the Parallel models: (a) Arrhenius, (b) Curvilinear, and
841
+ (c) Carlson). The points that form the curve in red (circle marks) were calculated by applying the RCI (Eq. 23a, 23b, and
842
+ 23c). The values calculated using the PET are in blue (square marks).
843
+ Table 2
844
+ Comparison of thermal indexes for the models presented in this work, calculated using the PET and the RCI
845
+ Principle of Equivalent Time (PET) thermal indexes
846
+ Fit
847
+ 푇퐶1
848
+ 푇퐶10
849
+ 푇퐶100
850
+ 푇퐴1
851
+ 푇퐴10
852
+ 푇퐴100
853
+ PA
854
+ 130(7)
855
+ 144(7)
856
+ 158(8)
857
+ 153(8)
858
+ 168(8)
859
+ 184(9)
860
+ PC
861
+ 105(6)
862
+ 121(6)
863
+ 139(7)
864
+ 132(8)
865
+ 151(8)
866
+ 171(9)
867
+ CM
868
+ 130(7)
869
+ 143(7)
870
+ 157(8)
871
+ 153(8)
872
+ 168(8)
873
+ 184(9)
874
+ FA
875
+ 134(7)
876
+ 146(7)
877
+ 160(8)
878
+ 163(8)
879
+ 176(8)
880
+ 191(10)
881
+ FC
882
+ 111(6)
883
+ 126(6)
884
+ 143(7)
885
+ 143(7)
886
+ 160(8)
887
+ 179(9)
888
+ Rate constant integral on a path (RCI) thermal indexes
889
+ Fit
890
+ 푇퐶1
891
+ 푇퐶10
892
+ 푇퐶100
893
+ 푇퐴1
894
+ 푇퐴10
895
+ 푇퐴100
896
+ PA
897
+ 131(7)
898
+ 145(7)
899
+ 159(8)
900
+ 155(8)
901
+ 170(8)
902
+ 186(9)
903
+ PC
904
+ 106(6)
905
+ 122(6)
906
+ 140(7)
907
+ 133(8)
908
+ 152(7)
909
+ 172(9)
910
+ CM
911
+ 131(7)
912
+ 144(7)
913
+ 158(8)
914
+ 155(8)
915
+ 170(8)
916
+ 186(9)
917
+ FA
918
+ 125(7)
919
+ 137(7)
920
+ 150(7)
921
+ 153(9)
922
+ 166(8)
923
+ 181(10)
924
+ FC
925
+ 100(6)
926
+ 115(6)
927
+ 130(6)
928
+ 130(8)
929
+ 148(8)
930
+ 165(8)
931
+ Notes: 1. Temperatures in ◦C. 2. The standard errors are given in parentheses. 3. 푇퐶: Closure Temperature. 4. 푇퐴:
932
+ Total Annealing Temperature. 5. The numbers to the right of 푇퐶 and 푇퐴 are the cooling rates in ◦C/Ma.
933
+ The PET was formulated under the hypothesis that the annealing of fission tracks is a single activation energy pro-
934
+ cess Duddy et al. (1988). The internal consistency between PET and RCI values of 푟, 푇퐶, and 푇퐴 calculated with the
935
+ parallel models is a check for the robustness of the physicochemical approach to deal with variable temperature thermal
936
+ histories. It is to be noted that not only the PA model, in which the activation energy is temperature-independent (Ta-
937
+ ble 1, Eq. (PA3)), but also the PC model, in which the activation energy is temperature-dependent (Table 1, Eqs. (FC3)),
938
+ show such internal consistency. The same agreement is observed for CM. The CM activation energy may vary with
939
+ temperature but, with the parameters shown in Table 1, its Arrhenius activation energy is approximately constant since
940
+ the value of 푐2∕푐1 (54.9 kcal/mol) is much higher than typical values of 푅푇 (< 1.0 kcal/mol). Although the activation
941
+ energies may vary with temperature, these models imply that at any given temperature, the recombination events are
942
+ taking place with the same activation energy. This is a sufficient condition for the applicability of the PET.
943
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
944
+ Page 10 of 15
945
+
946
+ 1.0
947
+ PA
948
+ S0.8
949
+ Reduction
950
+ 0.6
951
+ Method
952
+ Length F
953
+ RCI
954
+ 0.4
955
+ PET
956
+ 0.2
957
+ 0.0
958
+ 50
959
+ 100
960
+ 150
961
+ 200
962
+ Thermal history duration (t)1.0
963
+ PC
964
+ S0.8
965
+ Reduction
966
+ 0.6
967
+ Method
968
+ Length F
969
+ RCI
970
+ 0.4
971
+ PET
972
+ 0.2
973
+ 0.0
974
+ 50
975
+ 100
976
+ 150
977
+ 200
978
+ Thermal history duration (t)1.0
979
+ CM
980
+ C0.8
981
+ Reduction
982
+ 0.6
983
+ Method
984
+ Length F
985
+ RCI
986
+ 0.4
987
+ PET
988
+ 0.2
989
+ 0.0
990
+ 50
991
+ 100
992
+ 150
993
+ 200
994
+ Thermal history duration (t)3.2. Fanning models
995
+ The values of 푟, 푇퐶, and 푇퐴 for the cooling T-t path were also calculated for the fanning models, using both the
996
+ interactive PET and RCI methods. However, the RCI (Eq. (22)) could not be solved analytically for the FA and FC
997
+ rate constants (Table 1, Eqs. (FA2) and (FC2)). The integrals were then solved numerically with the Wolfram Mathe-
998
+ matica software (Wolfram-Research-Inc., 2021). For validation, the integrals for the parallel models were also solved
999
+ numerically resulting in exactly the same values obtained with the analytical solutions (Eqs. (23a)-(23c)). Another
1000
+ feature to be considered is that there are certain fractional values allowed for the reaction order 푛, given by Eq. (19).
1001
+ The analysis will be limited to 푛 = 0.5, 푛 = 0.75 and 푛 = 0.9. The numerical method breaks down when 푛 > 0.95
1002
+ although its mathematical upper bound is 푛 < 1. The reduced length calculation results are shown in Fig. 3. Values
1003
+ calculated with the PET are shown in blue, with triangle marks, while values found by solving the RCI are shown,
1004
+ in red (푛 = 0.5), purple (푛 = 0.75), and light purple (푛 = 0.9), respectively with circle, square, and diamond marks.
1005
+ RCI 푟 curves are very close to each other but depart from the 푟 values calculated with the PET. Significant differences
1006
+ between RCI and PET 푟 values are observed for the FC (Fig. 3b) and for the FA (Fig. 3a) models.
1007
+ (a)
1008
+ (b)
1009
+ Figure 3: Values of the reduced track lengths (푟), after a linear cooling ( ̇푇 = 1.0◦C/Ma) starting at time 푡 and ending at
1010
+ the present time at a fixed temperature of 20◦C, calculated using the Fanning models: (a) Arrhenius and (b) Curvilinear.
1011
+ The points that form the curves in red (circle marks), purple (square marks), and light purple (diamond marks) were
1012
+ calculated by applying the RCI, respectively with 푛 = 0.5, 푛 = 0.75, and 푛 = 0.9. The values calculated using the PET are
1013
+ in blue.
1014
+ The values of 푇퐶 and 푇퐴 for the fanning models, calculated using the PET and the RCI (푛 = 0.5) are in Table 2. For
1015
+ the FA model, the mean relative errors between the PET and the RCI 푇퐶 and 푇퐴 calculations are respectively 6.25%
1016
+ and 5.68%. For the FC model, the same comparisons result in still more significant differences: 9.57% (푇퐶) and 8.48%
1017
+ (푇퐴). The deviations between the values calculated using the PET and the RCI are much more significant than the ones
1018
+ found for the parallel model calculations. One major issue is that the fanning models do not fulfill the single activation
1019
+ energy hypothesis on which the PET is founded. The fanning models emerge from multiple concurring processes with
1020
+ different activation energies (Tamer and Ketcham, 2020; Rufino and Guedes, 2022). The effective Arrhenius activation
1021
+ energies incorporate a time dependence (Table 1, Eqs. (FA3) and (FC3)) that is the consequence of their dependence
1022
+ on the current reduced fission-track length (different slopes of the isoretention curves in the pseudo-Arrhenius plot).
1023
+ On the other hand, the rate constant integral (Eq. (22)) was obtained in a physicochemical framework developed to
1024
+ deal with chemical reactions that did not fit the single activation energy Arrhenius law (Vyazovkin, 2015). It is by
1025
+ design suitable for complex activation energy systems like the ones pictured by the fanning models. Note also that
1026
+ the presented figures are particular of the fitting parameters in Table 1. A different set of parameters would result in
1027
+ different values without changing the conclusion that RCI and PET predictions deviate from each other.
1028
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1029
+ Page 11 of 15
1030
+
1031
+ 1.0
1032
+ FA
1033
+ 0.8
1034
+ Reduction
1035
+ 0.6
1036
+ Method
1037
+ RCI (0.5)
1038
+ Length F
1039
+ RCI (0.75)
1040
+ 0.4
1041
+ RCI (0.9)
1042
+ PET
1043
+ 0.2
1044
+ 0.0
1045
+ 50
1046
+ 100
1047
+ 150
1048
+ 200
1049
+ Thermal history duration (t)1.0
1050
+ FC
1051
+ 0.8
1052
+ Reduction
1053
+ 0.6
1054
+ Method
1055
+ RCI (0.5)
1056
+ Length F
1057
+ RCI (0.75)
1058
+ 0.4
1059
+ RCI (0.9)
1060
+ PET
1061
+ 0.2
1062
+ 0.0
1063
+ 50
1064
+ 100
1065
+ 150
1066
+ 200
1067
+ Thermal history duration (t)3.3. Implications for the thermal history modeling
1068
+ The fanning models, especially the Fanning Curvilinear, have been shown to produce better fits to laboratory data
1069
+ and better geological extrapolation of annealing effects (Ketcham et al., 2007; Guedes et al., 2013). However, the
1070
+ application of the FC along with the PET is an approximation. Compared with the RCI formulation, it underestimates
1071
+ the annealing effect in about 10%, i.e., it predicts that higher temperatures are necessary for the same length shortening
1072
+ as calculated with the RCI for the tested cooling histories. In the context of the inverse problem of inferring T-t paths
1073
+ from the FT age and track length distribution of a mineral sample, it implies the requirement of a longer residence
1074
+ time in the partial annealing zone. For instance, compare, in Table 2, the FC 푇퐴 calculated with RCI for a cooling rate
1075
+ of 10◦C/Ma (148◦C) with the FC 푇퐴 calculated with PET for a cooling rate of 1◦C/Ma (143◦C). The same analysis
1076
+ applies to the FA model with less significant relative error figures (about 6%).
1077
+ The Parallel models (PA, PC, and CM), which can be safely applied along with the PET, have long been ruled out for
1078
+ FTT studies (Laslett et al., 1987; Guedes et al., 2013; Ketcham et al., 1999, 2007; Ketcham, 2019). Duddy et al. (1988)
1079
+ had argued that the FA deviated only slightly from the PA model and applied it along with the PET. The isoretention
1080
+ curves for the two models follow approximately the same trends (Fig. 1a). The same behavior is observed for the
1081
+ curvilinear models (Fig. 1b). FC and PC isoretention curves bend together towards lower temperatures. Their argument
1082
+ can be better appreciated in Fig. 4. All the PET and RCI predictions for reduced lengths after the track underwent the
1083
+ cooling history are gathered in the same plot. Note that the linear models (PA and FA) and the approximately linear CM
1084
+ form a cluster, while the curvilinear models (PC and FC) form a separate set. The predictions with the fanning models
1085
+ and PET are closer to the predictions of the parallel models for track populations born when the sample passed through
1086
+ intermediate temperatures (partial annealing zone), which results in closer 푇퐶 values (compare values in Table 2). For
1087
+ populations born at higher temperatures, the fanning-PET predictions depart from the parallel model ones, resulting in
1088
+ a more significant difference between calculated 푇퐴 values. Calculations with RCI approximate fanning and parallel
1089
+ model predictions for populations born at higher temperatures. Within this approximation, it could be possible to
1090
+ engineer fanning model parameters to make the model even closer to a parallel model.
1091
+ Figure 4: Values of the reduced track lengths (푟), after a linear cooling ( ̇푇 = 1.0◦C/Ma) starting at time 푡 and ending at
1092
+ the present time at a fixed temperature of 20◦C, calculated for the parallel and fanning models. Calculations using RCI
1093
+ are shown as solid geometric forms, while calculations using PET are represented by empty geometric forms.
1094
+ 4. Concluding remarks
1095
+ Departing from Eq. (9), a physicochemical framework was built to deal with the effects of annealing in variable tem-
1096
+ perature T-t paths. The basic building blocks are the reaction function 푓푟(푟) and the effective rate constant, 푘푒푓(푡, 푇 ).
1097
+ The parallel models (PA, PC, and CM) were shown to be consistent with the single activation energy rate constants
1098
+ given by Eqs. (10a)-(10c) and with the reaction-order function (Eq. (8)). The fanning models (FA and FC) are the
1099
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1100
+ Page 12 of 15
1101
+
1102
+ 1.0
1103
+ 0.8
1104
+ Reduction
1105
+ 0.6
1106
+ RCI Method
1107
+ PET Method
1108
+ Length F
1109
+ 0.4
1110
+ PA
1111
+ 0
1112
+ PA
1113
+ PC
1114
+ FA
1115
+ CM
1116
+ CM
1117
+ FA
1118
+
1119
+ PC
1120
+ 0.2
1121
+ FC
1122
+ FC
1123
+ 0.0
1124
+ 50
1125
+ 100
1126
+ 150
1127
+ 200
1128
+ Thermal history duration (t)representation of multiple concurrent recombination processes with different activation energies (Rufino and Guedes,
1129
+ 2022). The 푘푒푓(푡, 푇 ) functions were built (Eq. (15)) to be consistent with the reaction-order function (Eq.(8)). Obtain-
1130
+ ing FA and FC rate constants from first principle, i.e., a composition of rate constants for individual processes, and
1131
+ validating them experimentally is still an open issue that has to be dealt with. The Eq. (22) is the line integral related
1132
+ to Eq. (9) from which length shortening due to cooling T-t paths can be directly calculated, independently of whether
1133
+ the rate constant represents a single or multiple activation energy mechanism.
1134
+ The Principle of Equivalent Time, on the other hand, is only valid for single activation energy equations, which, for
1135
+ the fission-track system, are the parallel models. In these cases, the RCI-based calculations are in agreement with the
1136
+ PET ones (Fig. 2), indicating the robustness of RCI formulation. For the fanning models, the use of the PET has long
1137
+ been recognized as an approximation (Duddy et al., 1988). Deviations have indeed been observed between RCI and
1138
+ PET-based calculations (Fig. 3). Compared to the application of RCI, the PET calculation underestimates annealing
1139
+ effects in variable temperature T-t paths (Table 2).
1140
+ The PET along with FA or FC models is the calculation method used to infer most published thermal histories.
1141
+ This procedure introduces a systematic deviation that should be considered in the geological interpretation of the
1142
+ thermal history modeling. Alternatively, the rate constant integral (Eq. (22)) could be considered to substitute the
1143
+ PET in inversion thermal history codes (Ketcham, 2005; Gallagher, 2012). Computationally, solving an integral,
1144
+ even numerically, is a routine faster than the interactive steps necessary to apply the PET. More importantly, if the
1145
+ rate constants are representative of the track annealing kinetics, this framework results, in principle, in more accurate
1146
+ predictions of the annealing effects in samples submitted to variable temperature thermal histories.
1147
+ Appendix
1148
+ A. Equations for the application of the Principle of Equivalent Time
1149
+ The proposed method to deal with annealing in thermal histories with variable temperatures is also based on the
1150
+ Arrhenius equation and was first proposed by Goswami et al. (1984). The Principle of Equivalent Time (PET), on
1151
+ which this method is founded, states that the annealing rate of a track does not depend on its previous thermal history,
1152
+ but only on its current length.
1153
+ The procedure to solve this problem is to infer the magnitude of annealing recursively, dividing the thermal history
1154
+ into appropriate intervals Δ푡푖 and starting from a given value of 푟. For each step, annealing is carried out at constant
1155
+ temperature. In the first step, centered at (푡1, 푇1). The procedure will be shown for the Parallel Arrhenius model. The
1156
+ reduced fission-track length is calculated using Eq. (PA1), along with 푔(푟) = ln(1 − 푟):
1157
+ 푟1 = 1 − (Δ푡1)푐1 exp
1158
+ (
1159
+ 푐0 + 푐2
1160
+ 푅푇1
1161
+ )
1162
+ (24)
1163
+ For the next step, the calculation of 푟2 incorporates the principle in the form that this new shortening is postulated
1164
+ to be independent of the previous one. Thus, one can find a length of time, 휏푟1, that will yield the length 푟1 but for
1165
+ heating at the temperature of the second interval, 푇2:
1166
+ 휏푟1 = (1 − 푟1)−1∕푐1 exp
1167
+ (
1168
+ − 1
1169
+ 푅푇2
1170
+ 푐2
1171
+ 푐1
1172
+ + 푐0
1173
+ 푐1
1174
+ )
1175
+ .
1176
+ (25)
1177
+ The value of 푟2 is then found by the application of the annealing model to the interval 휏푟1 + Δ푡2:
1178
+ 푟2 = 1 − (휏푟1 + Δ푡2)푐1 exp
1179
+ (
1180
+ 푐0 + 푐2
1181
+ 푅푇2
1182
+ )
1183
+ .
1184
+ (26)
1185
+ This procedure is interactively repeated for the entire T-t path. The last value of 푟 will be the reduced length of
1186
+ the population born in the first interval after experiencing the entire thermal history. For each interval 푗, the formulas
1187
+ above are:
1188
+ 푟푗
1189
+ = 1 − (휏푟푗−1 + Δ푡푗)푐1 exp
1190
+ (
1191
+ 푐0 +
1192
+ 푐2
1193
+ 푅푇푗
1194
+ )
1195
+ (27a)
1196
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1197
+ Page 13 of 15
1198
+
1199
+ Table A1
1200
+ Equations for the application of the Principle of Equivalent Time
1201
+ FT models
1202
+ Equations
1203
+ Parameters (standard error)
1204
+ Parallel Arrhenius
1205
+ (PA)
1206
+ ln (1 − 푟푗−1
1207
+ ) = 1 − exp
1208
+ [
1209
+ 푓푃 퐴
1210
+ (
1211
+ Δt푗 + 휏푟푗−1, 푇푗
1212
+ )]
1213
+ (PA4)
1214
+ ln
1215
+ (
1216
+ 휏푟푗−1
1217
+ )
1218
+ =
1219
+ ln (1 − 푟푗−1
1220
+ )
1221
+ 푐1
1222
+
1223
+ 푐2
1224
+ 푐1푅푇푗
1225
+ − 푐0
1226
+ 푐1
1227
+ (PA5)
1228
+ 푐0 = 5.631 (0.220)
1229
+ 푐1 = 0.1865 (0.0066)
1230
+ 푐2 = -10.46 (0.31) kcal/mol
1231
+ 휒2
1232
+ 휈 = 2.65
1233
+ Parallel curvilinear
1234
+ (PC)
1235
+ ln (1 − 푟푗−1
1236
+ ) = 1 − exp
1237
+ [
1238
+ 푓푃 퐶
1239
+ (
1240
+ Δt푗 + 휏푟푗−1, 푇푗
1241
+ )]
1242
+ (PC4)
1243
+ ln
1244
+ (
1245
+ 휏푟푗−1
1246
+ )
1247
+ =
1248
+ ln (1 − 푟푗−1
1249
+ )
1250
+ 푐1
1251
+ − 푐2
1252
+ 푐1
1253
+ ln
1254
+ (
1255
+ 1
1256
+ 푅푇푗
1257
+ )
1258
+ − 푐0
1259
+ 푐1
1260
+ (PC5)
1261
+ 푐0 = -4.910 (0.096)
1262
+ 푐1 = 0.1944 (0.0060)
1263
+ 푐2 = -9.610 (0.244)
1264
+ 휒2
1265
+ 휈 = 2.12
1266
+ Carlson Model
1267
+ (CM)
1268
+ ln (1 − 푟푗−1
1269
+ ) = 1 − exp
1270
+ [
1271
+ 푓퐶푀
1272
+ (
1273
+ Δt푗 + 휏푟푗−1, 푇푗
1274
+ )]
1275
+ (CM4)
1276
+ ln
1277
+ (
1278
+ 휏푟푗−1
1279
+ )
1280
+ =
1281
+ ln (1 − 푟푗−1
1282
+ )
1283
+ 푐1
1284
+
1285
+ 푐2
1286
+ 푐1푅푇푗
1287
+ − 푐0
1288
+ 푐1
1289
+ − ln (푅푇푗
1290
+ )
1291
+ (CM5)
1292
+ 푐0 = 5.426 (0.2155)
1293
+ 푐1 = 0.1867 (0.0066)
1294
+ 푐2 = -10.25 (0.2994) kcal/mol
1295
+ 휒2
1296
+ 휈 = 2.63
1297
+ Fanning Arrhenius
1298
+ (FA)
1299
+ ln (1 − 푟푗−1
1300
+ ) = 1 − exp
1301
+ [
1302
+ 푓퐹퐴
1303
+ (
1304
+ Δt푗 + 휏푟푗−1, 푇푗
1305
+ )]
1306
+ (FA4)
1307
+ ln
1308
+ (
1309
+ 휏푟푗−1
1310
+ )
1311
+ =
1312
+ [ln (1 − 푟푗−1
1313
+ ) − 푐0
1314
+ ] [
1315
+ 1
1316
+ 푅푇푗 − 푐3
1317
+ ]
1318
+ 푐1
1319
+ + 푐2
1320
+ (FA5)
1321
+ 푐0 = -8.518 (1.072)
1322
+ 푐1 = 0.1266 (0.0191) mol/kcal
1323
+ 푐2 = -20.99 (5.81)
1324
+ 푐3 = 0.2985 (0.1026) mol/kcal
1325
+ 휒2
1326
+ 휈 = 1.66
1327
+ Fanning curvilinear
1328
+ (FC)
1329
+ ln (1 − 푟푗−1
1330
+ ) = 1 − exp
1331
+ [
1332
+ 푓퐹퐶
1333
+ (
1334
+ Δt푗 + 휏푟푗−1, 푇푗
1335
+ )]
1336
+ (FC4)
1337
+ ln
1338
+ (
1339
+ 휏푟푗−1
1340
+ )
1341
+ =
1342
+ [ln (1 − 푟푗−1
1343
+ ) − 푐0
1344
+ ] [
1345
+ ln
1346
+ (
1347
+ 1
1348
+ 푅푇푗
1349
+ )
1350
+ − 푐3
1351
+ ]
1352
+ 푐1
1353
+ + 푐2
1354
+ (FC5)
1355
+ 푐0 = -9.449 (1.480)
1356
+ 푐1 = 0.1627 (0.0298)
1357
+ 푐2 = -24.58 (7.75)
1358
+ 푐3 = -0.8626 (0.1549)
1359
+ 휒2
1360
+ 휈 = 1.88
1361
+ Notes: 1. For each fission-track annealing model (Eqs. (PA1), (PC1), (FA1), (FC1)), the length reduction 푟 (Eqs. (PA4),
1362
+ (PC4), (FA4), (FC4)) and equivalent time 휏 (Eqs. (PA5), (PC5), (FA5), (FC5)) were obtained using 푔(푟) = ln(1 − 푟).
1363
+ 휏푟푗−1
1364
+ = (1 − 푟푗−1)−1∕푐1 exp
1365
+ (
1366
+ − 1
1367
+ 푅푇푗
1368
+ 푐2
1369
+ 푐1 + 푐0
1370
+ 푐1
1371
+ )
1372
+ (27b)
1373
+ The formulas for the PA, PC, CM, FA, and FC models are presented in Table A1.
1374
+ Acknowledgements
1375
+ This work has been funded by grant 308192/2019-2 by the National Council for Scientific and Technological
1376
+ Development (Brazil).
1377
+ References
1378
+ Arrhenius, S., 1889. Über die reaktionsgeschwindigkeit bei der inversion von rohrzucker durch säuren. Zeitschrift für Physikalische Chemie 4,
1379
+ 226–248. URL: https://www.degruyter.com/view/journals/zpch/4U/1/article-p226.xml.
1380
+ Carlson, W.D., 1990. Mechanisms and kinetics of apatite fission-track annealing. American Mineralogist 75, 1120–1139. URL: <GotoISI>:
1381
+ //WOS:A1990EL25100017. times Cited: 116 Carlson, wd Carlson, William/A-5807-2008 Carlson, William/0000-0002-2954-5886 121.
1382
+ Carlson, W.D., Donelick, R.A., Ketcham, R.A., 1999. Variability of apatite fission-track annealing kinetics: I. experimental results. American
1383
+ Mineralogist 84, 1213–1223. URL: <GotoISI>://WOS:000082349700001. times Cited: 346 Carlson, WD Donelick, RA Ketcham, RA
1384
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1385
+ Page 14 of 15
1386
+
1387
+ Ketcham, Richard/B-5431-2011; Carlson, William/A-5807-2008 Ketcham, Richard/0000-0002-2748-0409; Carlson, William/0000-0002-2954-
1388
+ 5886 370.
1389
+ Cohen, E., Cvitas, T., Fry, J., et al., 2007. Iupac quantities, units and symbols in physical chemistry. IUPAC and RSC Publishing, Cambridge .
1390
+ Córdoba-Torres, P., Nogueira, R.P., Fairén, V., 2003. Fractional reaction order kinetics in electrochemical systems involving single-reactant, bi-
1391
+ molecular desorption reactions. Journal of Electroanalytical Chemistry 560, 25–33.
1392
+ Crowley, K.D., Cameron, M., Schaefer, R.L., 1991. Experimental studies of annealing of etched fission tracks in fluorapatite. Geochimica Et
1393
+ Cosmochimica Acta 55, 1449–1465. URL: <GotoISI>://WOS:A1991FM71600019, doi:10.1016/0016-7037(91)90320-5. times Cited:
1394
+ 187 Crowley, kd cameron, m schaefer, rl 210.
1395
+ Dodson, M.H., 1973. Closure temperature in cooling geochronological and petrological systems. Contributions to Mineralogy and Petrology 40,
1396
+ 259–274.
1397
+ Duddy, I., Green, P., Laslett, G., 1988. Thermal annealing of fission tracks in apatite 3. variable temperature behaviour. Chemical Geology: Isotope
1398
+ Geoscience section 73, 25–38.
1399
+ Elzhov, T.V., Mullen, K.M., Spiess, A.N., Bolker, B., 2016. minpack.lm: R Interface to the Levenberg-Marquardt Nonlinear Least-Squares Al-
1400
+ gorithm Found in MINPACK, Plus Support for Bounds. URL: https://CRAN.R-project.org/package=minpack.lm. r package version
1401
+ 1.2-1.
1402
+ Gallagher, K., 2012. Transdimensional inverse thermal history modeling for quantitative thermochronology. Journal of Geophysical Research-Solid
1403
+ Earth 117. URL: <GotoISI>://WOS:000301136400002, doi:10.1029/2011jb008825.
1404
+ Goswami, J., Jha, R., Lal, D., 1984. Quantitative treatment of annealing of charged particle tracks in common minerals. Earth and Planetary Science
1405
+ Letters 71, 120–128.
1406
+ Green, P., Duddy, I., Laslett, G., 1988. Can fission track annealing in apatite be described by first-order kinetics? Earth and Planetary Science
1407
+ Letters 87, 216–228.
1408
+ Green, P.F., Duddy, I.R., Gleadow, A.J.W., Tingate, P.R., Laslett, G.M., 1986. Thermal annealing of fission tracks in apatite .1. a qualitative
1409
+ description. Chemical Geology 59, 237–253. URL: <GotoISI>://WOS:A1986F612200002, doi:10.1016/0009-2541(86)90048-3.
1410
+ Guedes, S., Lixandrão Filho, A.L., Hadler, J.C., 2022. Generalization of the fission-track arrhenius annealing equations. Mathematical Geosciences
1411
+ , 1–20.
1412
+ Guedes, S., Moreira, P.A., Devanathan, R., Weber, W.J., Hadler, J.C., 2013. Improved zircon fission-track annealing model based on reevaluation
1413
+ of annealing data. Physics and Chemistry of Minerals 40, 93–106.
1414
+ Guedes, S., Oliveira, K., Moreira, P., Iunes, P., et al., 2006. Kinetic model for the annealing of fission tracks in minerals and its application to apatite.
1415
+ Radiation measurements 41, 392–398.
1416
+ Issler, D., 1996. Optimizing time step size for apatite fission track annealing model. Computers and Geosciences 22, 835–835.
1417
+ Ketcham, R.A., 2005. Forward and inverse modeling of low-temperature thermochronometry data. volume 58 of Reviews in Mineralogy and
1418
+ Geochemistry. pp. 275–314. URL: <GotoISI>://WOS:000235184200011, doi:10.2138/rmg.2005.58.11.
1419
+ Ketcham, R.A., 2019. Fission-Track Annealing: From Geologic Observations to Thermal History Modeling. Springer International Publishing,
1420
+ Cham. pp. 49–75. doi:10.1007/978-3-319-89421-8\_3.
1421
+ Ketcham, R.A., Carter, A., Donelick, R.A., Barbarand, J., Hurford, A.J., 2007. Improved modeling of fission-track annealing in apatite. American
1422
+ Mineralogist 92, 799–810.
1423
+ Ketcham, R.A., Donelick, R.A., Carlson, W.D., 1999.
1424
+ Variability of apatite fission-track annealing kinetics: Iii. extrapolation to geologi-
1425
+ cal time scales.
1426
+ American Mineralogist 84, 1235–1255.
1427
+ URL: <GotoISI>://WOS:000082349700003. times Cited: 495 Ketcham, RA
1428
+ Donelick, RA Carlson, WD Carlson, William/A-5807-2008; Ketcham, Richard/B-5431-2011 Carlson, William/0000-0002-2954-5886; Ketcham,
1429
+ Richard/0000-0002-2748-0409 592.
1430
+ Koga, N., Tanaka, H., Šesták, J., 1992. On the fractional conversion 훼 in the kinetic description of solid-state reactions. Journal of thermal analysis
1431
+ 38, 2553–2557.
1432
+ Kooij, D.M., 1893. Über die zersetzung des gasförmigen phosphorwasserstoffs. Zeitschrift für Physikalische Chemie 12, 155 – 161. URL: https:
1433
+ //www.degruyter.com/view/journals/zpch/12U/1/article-p155.xml, doi:https://doi.org/10.1515/zpch-1893-1214.
1434
+ Laidler, K.J., 1984.
1435
+ The development of the arrhenius equation.
1436
+ Journal of Chemical Education 61, 494–498.
1437
+ URL: <GotoISI>://WOS:
1438
+ A1984SX06600005, doi:10.1021/ed061p494. times Cited: 514 Laidler, kj 527.
1439
+ Laidler, K.J., Keith, J., et al., 1965. Chemical kinetics. volume 2. McGraw-Hill New York.
1440
+ Laslett, G., Galbraith, R., 1996.
1441
+ Statistical modelling of thermal annealing of fission tracks in apatite.
1442
+ Geochimica et Cosmochimica Acta
1443
+ 60, 5117 – 5131. URL: http://www.sciencedirect.com/science/article/pii/S0016703796003079, doi:https://doi.org/10.
1444
+ 1016/S0016-7037(96)00307-9.
1445
+ Laslett, G., Green, P.F., Duddy, I., Gleadow, A., 1987. Thermal annealing of fission tracks in apatite 2. a quantitative analysis. Chemical Geology:
1446
+ Isotope Geoscience Section 65, 1–13.
1447
+ Rana, M.A., Lixandrao, A.L., Guedes, S., 2021. A new phenomenological model for annealing of fission tracks in apatite: laboratory data fit-
1448
+ ting and geological benchmarking. Physics and Chemistry of Minerals 48. URL: <GotoISI>://WOS:000641849100002, doi:10.1007/
1449
+ s00269-021-01143-9.
1450
+ Rufino, M., Guedes, S., 2022. Arrhenius activation energy and transitivity in fission-track annealing equations. Chemical Geology , 120779.
1451
+ Tamer, M., Ketcham, R., 2020. Is low-temperature fission-track annealing in apatite a thermally controlled process? Geochemistry, Geophysics,
1452
+ Geosystems 21, e2019GC008877.
1453
+ Vyazovkin, S., 2015. Isoconversional Kinetics of Thermally Stimulated Processes. 1 ed., Springer, Cham. doi:https://doi.org/10.1007/
1454
+ 978-3-319-14175-6.
1455
+ Vyazovkin, S., 2016. A time to search: finding the meaning of variable activation energy. Physical Chemistry Chemical Physics 18, 18643–18656.
1456
+ Wauschkuhn, B., Jonckheere, R., Ratschbacher, L., 2015. The ktb apatite fission-track profiles: Building on a firm foundation?
1457
+ Geochimica
1458
+ Et Cosmochimica Acta 167, 27–62. URL: <GotoISI>://WOS:000361007300003, doi:10.1016/j.gca.2015.06.015. times Cited: 11
1459
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1460
+ Page 15 of 15
1461
+
1462
+ Wauschkuhn, B. Jonckheere, R. Ratschbacher, L. Ratschbacher, Lothar/0000-0001-9960-2084; Wauschkuhn, Bastian/0000-0002-4684-5178 11
1463
+ 1872-9533.
1464
+ Wolfram-Research-Inc., 2021. Mathematica, Version 12.3. Champaign, IL URL: https://www.wolfram.com/mathematica.
1465
+ M. Rufino, A.L. Lixandrão-Filho, S. Guedes
1466
+ Page 16 of 15
1467
+
W9E1T4oBgHgl3EQfvgXD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
XNAyT4oBgHgl3EQf9PpX/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acc2fded35d8e86f471a2e5cdb789396feb1bdfbe65c2f3bd592b7323b37adf7
3
+ size 150940
XNE0T4oBgHgl3EQf3QLX/content/tmp_files/2301.02724v1.pdf.txt ADDED
@@ -0,0 +1,1914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Facilitating Contrastive Learning of Discourse Relational Senses by
2
+ Exploiting the Hierarchy of Sense Relations
3
+ Wanqiu Long†, and Bonnie Webber†
4
+ † University of Edinburgh, Edinburgh, UK
5
6
+ Abstract
7
+ Implicit discourse relation recognition is a
8
+ challenging task that involves identifying the
9
+ sense or senses that hold between two adja-
10
+ cent spans of text, in the absence of an explicit
11
+ connective between them.
12
+ In both PDTB-2
13
+ (Prasad et al., 2008) and PDTB-3 (Webber
14
+ et al., 2019), discourse relational senses are
15
+ organized into a three-level hierarchy rang-
16
+ ing from four broad top-level senses, to more
17
+ specific senses below them.
18
+ Most previous
19
+ work on implicit discourse relation recogni-
20
+ tion have used the sense hierarchy simply
21
+ to indicate what sense labels were available.
22
+ Here we do more — incorporating the sense
23
+ hierarchy into the recognition process itself
24
+ and using it to select the negative examples
25
+ used in contrastive learning.
26
+ With no addi-
27
+ tional effort, the approach achieves state-of-
28
+ the-art performance on the task.
29
+ Our code
30
+ is released in https://github.com/wanqiulong
31
+ 0923/Contrastive_IDRR.
32
+ 1
33
+ Introduction
34
+ Discourse relations are an important aspect of
35
+ textual coherence. In some cases, a speaker or
36
+ writer signals the sense or senses that hold between
37
+ clauses and/or sentences in a text using an explicit
38
+ connective. Recognizing the sense or senses that
39
+ hold can be more difficult, in the absense of an
40
+ explicit connective.
41
+ Automatically identifying the sense or senses
42
+ that hold between sentences and/or clauses can
43
+ be useful for downstream NLP tasks such as text
44
+ summarization (Cohan et al., 2018), machine trans-
45
+ lation (Meyer et al., 2015) and event relation ex-
46
+ traction (Tang et al., 2021). Recent studies on im-
47
+ plicit discourse relation recognition have shown
48
+ great success. Especially, pre-trained neural lan-
49
+ guage models (Peters et al., 2018; Devlin et al.,
50
+ 2019; Liu et al., 2019) have been used and dramat-
51
+ ically improved the performances of models (Shi
52
+ and Demberg, 2019b; Liu et al., 2020; Kishimoto
53
+ Level2
54
+ Level1
55
+ Temporal
56
+ Asynchronous
57
+ Level3
58
+ Precedence
59
+ Succession
60
+ Root
61
+ Comparison
62
+ Contigency
63
+ Expansion
64
+ Synchronous
65
+ Concession
66
+ Contrast
67
+ Cause
68
+ ...
69
+ Purpose
70
+ Cause+Belief
71
+ ...
72
+ Reason
73
+ Result
74
+ Equivalence
75
+ Manner
76
+ Reason+Belief
77
+ Result+Belief
78
+ Figure 1: The PDTB-3 Sense Hierarchy
79
+ et al., 2020). The senses available for labelling
80
+ discourse relations in the PDTB-2 (and later in the
81
+ PDTB-3) are arranged in a three-level hierarchy,
82
+ with the most general senses at the top and more
83
+ specific senses further down. In the PDTB-3, anno-
84
+ tators could only choose senses at terminal nodes
85
+ in the hierarchy – level-2 senses for symmetric re-
86
+ lations such as EXPANSION.EQUIVALENCE and
87
+ TEMPORAL.SYNCHRONOUS, and level-3 senses
88
+ for asymmetric relations, with the direction of
89
+ the relation encoded in its sense label such as
90
+ SUBSTITUTION.ARG1-AS-SUBST (where the text
91
+ labelled ARG1 substitutes for the denied text
92
+ labelled ARG2) and SUBSTITUTION.ARG2-AS-
93
+ SUBST (where the text labelled ARG2 substitutes
94
+ for the denied text labelled ARG1). Early work on
95
+ recognizing the implicit relations only used the hi-
96
+ erarchy to choose a target for recognition (e.g., the
97
+ senses at level-1 (classes) or those at level-2 (types).
98
+ Recently, Wu et al. (2022) have tried to leverage the
99
+ dependence between the level-1 and level-2 labels
100
+ (cf. Section 2). The current work goes further, us-
101
+ ing the whole three-level sense hierarchy to select
102
+ the negative examples for contrastive learning.
103
+ Contrastive learning, which aims to minimize
104
+ the distance between similar instances (defined as
105
+ positive examples) and widen the difference with
106
+ dissimilar instances (negative examples), has been
107
+ considered as effective in constructing meaning-
108
+ ful representations (Kim et al., 2021; Zhang et al.,
109
+ 2021; Yan et al., 2021). Previous work on con-
110
+ trastive learning indicates that it is critical to se-
111
+ arXiv:2301.02724v1 [cs.CL] 6 Jan 2023
112
+
113
+ lect good negative samples (Alzantot et al., 2018;
114
+ Wu et al., 2020b; Wang et al., 2021). The insight
115
+ underlying the current work is that the hierarchy
116
+ of sense labels can enable the selection of good
117
+ negative examples for contrastive learning.
118
+ To
119
+ see this, consider Examples 1-3 below from the
120
+ PDTB-3. On the surface, they look somewhat sim-
121
+ ilar, but in Examples 1 and 2, the annotators took
122
+ the second sentence (Arg2) as providing more de-
123
+ tail about the first sentence (Arg1) — the sense
124
+ called EXPANSION.LEVEL-OF-DETAIL.ARG2-AS-
125
+ DETAIL, while in Example 3, they took the sec-
126
+ ond sentence as expressing a substitute for “Amer-
127
+ ican culture” in terms of what is relevant – the
128
+ sense called EXPANSION.SUBSTITUTION.ARG2-
129
+ AS-SUBST.
130
+ (1) “Valley National ”“isn’t out of the woods yet
131
+ ”. The key will be whether Arizona real es-
132
+ tate turns around or at least stabilizes..
133
+ (2) The House appears reluctant to join the sena-
134
+ tors. A key is whether House Republicans
135
+ are willing to acquiesce to their Senate col-
136
+ leagues’ decision to drop many pet provi-
137
+ sions..
138
+ (3) Japanese culture vs. American culture is ir-
139
+ relevant. The key is how a manager from
140
+ one culture can motivate employees from
141
+ another..
142
+ In this work, we use a multi-task learning frame-
143
+ work, which consists of classification tasks and a
144
+ contrastive learning task. Unlike most previous
145
+ work using one benchmark dataset (usually PDTB-
146
+ 2 or PDTB-3), we evaluate our systems on both
147
+ PDTB-2 and PDTB-3. Besides, Wang et al. (2021)
148
+ have shown that data augmentation can make rep-
149
+ resentations be more robust, thereby enriching the
150
+ data used in training. We thus follow Ye et al.
151
+ (2021) and Khosla et al. (2020) in identifying a rel-
152
+ evant form of data augmentation for our contrastive
153
+ learning approach to implicit relation recognition.
154
+ The main contributions of our work are as fol-
155
+ lows:
156
+ • We leveraged the sense hierarchy to get con-
157
+ trastive learning representation, learning an
158
+ embedding space in which examples from
159
+ same types at level-2 or level-3 stay close to
160
+ each other while sister types are far apart.
161
+ • We explored and compared different methods
162
+ of defining the negatives based on the sense
163
+ hierarchies in PDTB-2 and PDTB-3, finding
164
+ the approach which leads to the greatest im-
165
+ provements.
166
+ • Our proposed data augmentation method to
167
+ generate examples is helpful to improve the
168
+ overall performance of our model.
169
+ • We demonstrate that implicit relation recogni-
170
+ tion can benefit from a deeper understanding
171
+ of the sense labels and their organization.
172
+ 2
173
+ Related Work
174
+ Implicit discourse relation recognition
175
+ For
176
+ this task, Dai and Huang (2018) considered
177
+ paragraph-level context and inter-paragraph de-
178
+ pendency. Recently, Shi and Demberg (2019b)
179
+ showed that using the bidirectional encoder repre-
180
+ sentation from BERT (Devlin et al., 2019) is more
181
+ accurately to recognize Temporal.Synchrony, Com-
182
+ parison.Contrast, Expansion.Conjunction and Ex-
183
+ pansion.Alternative. Liu et al. (2020) showed that
184
+ different levels of representation learning are all
185
+ important to implicit relation recognition, and they
186
+ combined three modules to better integrate con-
187
+ text information, the interaction between two argu-
188
+ ments and to understand the text in depth. How-
189
+ ever, only two existing works leveraged the hier-
190
+ archy in implicit relation recognition. Both Wu
191
+ et al. (2020a) and Wu et al. (2022) first attempted
192
+ to assign a Level-1 sense that holds between argu-
193
+ ments, and then only considered as possible Level-
194
+ 2 senses, those that are daughters of the Level-1
195
+ sense.
196
+ Contrastive learning
197
+ Recently, there has been
198
+ a growing interest in applying contrastive learn-
199
+ ing in both the pre-training and fine-tuning objec-
200
+ tives of pre-trained language models. Gao et al.
201
+ (2021) used a contrastive objective to fine-tune pre-
202
+ trained language models to obtain sentence embed-
203
+ dings, and greatly improves state-of-the-art sen-
204
+ tence embeddings on semantic textual similarity
205
+ tasks. Suresh and Ong (2021) proposed label-aware
206
+ contrastive loss in the presence of larger number
207
+ and/or more confusable classes, and helps models
208
+ to produce more differentiated output distributions.
209
+ Besides, many works have demonstrated that se-
210
+ lecting good negative examples are very important
211
+ for using contrastive learning (Schroff et al., 2015;
212
+ Joshua et al., 2021; Cao et al., 2022). In our work,
213
+ we integrate contrastive learning loss with super-
214
+
215
+ vised losses and we use the structure of the sense
216
+ hierarchy to guide the selection of negative exam-
217
+ ples.
218
+ 3
219
+ Learning Loss
220
+ 3.1
221
+ Supervised Learning Loss
222
+ The standard approach today for classification task
223
+ is to use a standard cross-entropy loss:
224
+ Lsup = 1
225
+ N
226
+ N
227
+
228
+ i=1
229
+ −log
230
+ eW T
231
+ yisi
232
+
233
+ j eW T
234
+ j si
235
+ (1)
236
+ Where N denotes the number of training examples,
237
+ yi is the ground-truth class of the i-th class and Wj
238
+ is the weight vector of the j-th class.
239
+ 3.2
240
+ Contrastive Learning Loss
241
+ In contrastive learning, each example can be treated
242
+ as an anchor to get its positive and negative ex-
243
+ amples. Contrastive learning can pull the anchor
244
+ and its positive example together in the embedding
245
+ space, while the anchor and negative samples are
246
+ pushed apart. The contrastive learning loss was
247
+ used by Chen et al. (2020); Suresh and Ong (2021)
248
+ before. A set of N randomly sampled label pairs
249
+ is defined as xk, yk, where x and y represent sam-
250
+ ples and labels, respectively, k = 1, ..., N. Let i
251
+ be the index of anchor sample and j is the index
252
+ of a positive sample. where iϵ{1, ..., N}, i ̸= j.
253
+ Contrastive loss is defined as:
254
+ Lscl = −
255
+ N
256
+
257
+ i=1
258
+ esim(hj,hi)τ
259
+
260
+ i̸=k esim(hk,hi)τ
261
+ (2)
262
+ Here, h denotes the feature vector in the em-
263
+ bedding space, and τ is the temperature parameter.
264
+ Intuitively, the numerator computes the inner dot
265
+ product between the anchor points i and its positive
266
+ sample j. The denominator computes the inner dot
267
+ product between all i and the inner dot product be-
268
+ tween all negative samples. where a total of N − 1
269
+ samples are computed.
270
+ Supervised contrastive learning (Gunel et al.,
271
+ 2021) extends the equation.2 to the supervised sce-
272
+ nario. In particular, given the presence of labels,
273
+ the positive examples are all examples with the
274
+ same label. The loss is defined as:
275
+ Lscl =
276
+ N
277
+
278
+ i=1
279
+
280
+ 1
281
+ Nyi − 1
282
+ N
283
+
284
+ j=1
285
+ 1i̸=j1yi=yj
286
+ log
287
+ esim(hj,hi)τ
288
+ �N
289
+ k=1 1i̸=kesim(hk,hi)/τ
290
+ (3)
291
+ Nyjindicates the number of examples in a batch
292
+ that have the same label as i, τ is the temperature
293
+ parameter and h denotes the feature vector that is
294
+ from the l2 normalized final encoder hidden layer
295
+ before the softmax projection.
296
+ 4
297
+ Our Approach
298
+ Figure 2 shows the overall architecture of our
299
+ method. As figure 2 illustrates, we firstly use a
300
+ simple multi-task model based on RoBERTa-base
301
+ (Liu et al., 2019), and then we develop a contrastive
302
+ learning algorithm where the sense hierarchy is
303
+ used to select positive and negative examples. De-
304
+ tailed descriptions of our framework and our data
305
+ augmentation method are given below.
306
+ 4.1
307
+ Sentence Encoder
308
+ Every annotated discourse relation consists of two
309
+ sentences or clauses (its arguments) and one or
310
+ more relational senses that the arguments bear to
311
+ each other. We concatenate the two arguments
312
+ of each example and input them into RoBERTa.
313
+ Following standard practices, we add two special
314
+ tokens to mark the beginning ([CLS]) and the end
315
+ ([SEP]) of sentences. We use the representation of
316
+ [CLS] in the last layer as the representation of the
317
+ whole sentences.
318
+ 4.2
319
+ Data Augmentation
320
+ To increase the number of training examples, we
321
+ take advantage of meta-data recorded with each Im-
322
+ plicit Discourse Relation in the PDTB (cf. (Webber
323
+ et al., 2019), Section 8]). For each sense taken to
324
+ hold between the arguments of that relation, anno-
325
+ tators have recorded in the meta-data, an explicit
326
+ connective that could have signalled that sense. In
327
+ the past, this meta-data was used in implicit relation
328
+ recognition by both Patterson and Kehler (2013)
329
+ and Rutherford and Xue (2015). We have used
330
+ it in a different way, shown in Figure 3, to create
331
+ an additional training example for each connective
332
+ that appears in the meta-data. In the added training
333
+ example, this added connective becomes part of
334
+ the second argument of the relation (i.e., appearing
335
+ after the [SEP] character)
336
+ Since there is at least one explicit connective
337
+ recorded in the meta-data for each implicit dis-
338
+ course relation and at most two 1, for a training
339
+ batch of N tokens, there will be at least another
340
+ 1This is because the PDTB only allows for one or two
341
+ senses per relation.
342
+
343
+ ...
344
+ Encoder La�er
345
+ ... ...
346
+ T�a��f���e�
347
+ �a�e�
348
+ ...
349
+ Encoder La�er
350
+ Encoder La�er
351
+ I����
352
+ ...
353
+ ...
354
+ Le�e�-2
355
+ C�a���f�e�
356
+ Le�e�-1
357
+ C�a���f�e�
358
+ P���
359
+ P���
360
+ Se��e H�e�a�c��
361
+ a�c���
362
+ �������e
363
+ �e�a���e
364
+ [CLS]
365
+ �1
366
+ 1
367
+ �1
368
+
369
+ [SEP]
370
+ �2
371
+ 1
372
+ �2
373
+
374
+ [SEP]
375
+ [CLS]
376
+ �1
377
+ 1
378
+ �1
379
+
380
+ [SEP]
381
+ �2
382
+ 1
383
+ �2
384
+
385
+ [SEP]
386
+ Figure 2: The overall architecture of our model. When given an anchor, we search the positive and negative
387
+ examples in a training batch based on the sense hierarchy of the PDTB. We narrow the distances among examples
388
+ from the same types at level-2 or level-3 and enlarge the distances among examples from different types at level-2
389
+ and level-3.
390
+ N tokens introduced by this data augmentation
391
+ method, increasing the training batch to at least 2N
392
+ tokens.
393
+ 𝑒���:������������������������������������������������������������������������������
394
+ 𝑒���
395
+
396
+ ��������������������������������������������������������������������������������������������
397
+ Figure 3: An example with inserted connective: the
398
+ connective word is “In contrast”.
399
+ 4.3
400
+ Positive Pair and Negative Pair
401
+ Generation
402
+ We use the structure of the sense hierarchy to iden-
403
+ tify the positive and negative examples needed for
404
+ contrastive learning. The only senses used in anno-
405
+ tating discourse relations are ones at terminal nodes
406
+ of the sense hierarchy. This is Level 2 for symmet-
407
+ ric senses and Level 3 for asymmetric senses (i.e.,
408
+ where the inverse of the sense that holds between
409
+ Arg1 and Arg2 is what holds between Arg2 and
410
+ Arg1. For example, CONTRAST and SIMILARITY
411
+ are both symmetric senses, while MANNER and
412
+ CONDITION are asymmetric, given that there is a
413
+ difference between Arg2 being the manner of do-
414
+ ing Arg1 or Arg1 being the manner of doing Arg2).
415
+ In our work, when the lowest level of the senses
416
+ is level-3, we directly used the level-3 labels in-
417
+ stead of their parent at level-2. For example, under
418
+ the level-2 label Temporal.asynchronous, there are
419
+ two labels which are precedence and succession
420
+ at level-3. For this case, we replaced the level-2
421
+ label Temporal.asynchronous with the two labels
422
+ precedence and succession at level-3.
423
+ Although supervised contrastive learning in Eq.
424
+ 3 can be valid for different classes of positive ex-
425
+ ample pairs, its negative examples come from any
426
+ examples inside a batch except itself. We defined
427
+ l1, l2, l3 as the first, second, and third level in the
428
+ hierarchical structure respectively, and lϵli refers
429
+ to the labels from level i.
430
+ Instance e ∼ Same sub-level
431
+ epos
432
+ Given the representation of a sentence ei and its
433
+ first, second and third level of label li
434
+ 1, li
435
+ 2, li
436
+ 3, we
437
+ searched the set of examples with the same sec-
438
+ ond level labels or the same third level labels (if
439
+ the lowest level is level-3) as epos in each training
440
+ batch:
441
+ ei
442
+ pos = {e ∈ ei
443
+ pos : le
444
+ 2 == li
445
+ 2
446
+ or
447
+ le
448
+ 3 == li
449
+ 3}
450
+ (4)
451
+ E.g.
452
+ If the label of the anchor is Tempo-
453
+ ral.asynchronous.precedence, its positive examples
454
+ would be the examples with the same label.
455
+ Instance e ∼ Batch instance
456
+ eneg
457
+ Here, we would like to help the model discriminate
458
+ the sister types at level-2 and level-3 (if the lowest
459
+ level is level-3). We searched the set of examples
460
+ with different level-2 labels or level-3 labels as eneg
461
+ in each training batch.
462
+ E.g.
463
+ If the label of the anchor is Tempo-
464
+ ral.asynchronous.precedence, its negative exam-
465
+ ples would be its sister types at level-2 and level-
466
+ 3, namely Temporal.asynchronous.succession and
467
+ Temporal.synchronous.
468
+ ei
469
+ neg = {e ∈ ei
470
+ neg : le
471
+ 1 == li
472
+ 1
473
+ &
474
+ (le
475
+ 2 ̸= li
476
+ 2
477
+ &
478
+ le
479
+ 3 ̸= li
480
+ 3)}
481
+ (5)
482
+
483
+ 4.4
484
+ Loss Algorithms
485
+ As described above, given the query ei with its
486
+ positive pairs and negative pairs and based on the
487
+ general contrastive learning loss (see Equation 2),
488
+ the contrastive learning loss for our task and ap-
489
+ proach is:
490
+ Lscl =
491
+ N
492
+
493
+ i=1
494
+
495
+ 1
496
+ |eipos| − 1
497
+ 2N
498
+
499
+ j=1
500
+ 1i̸=j1j∈eipos
501
+ log
502
+ wjesim(hj,hi)τ
503
+ �2N
504
+ k=1 1i̸=k1k∈eineg+eiposwkesim(hk,hi)/τ
505
+ (6)
506
+ where wj and wj are weight factors for differ-
507
+ ent positive pairs and negative pairs respectively,
508
+ sim(hi, hj) is cosine similarity and τ is a tempera-
509
+ ture hyperparameter.
510
+ Our overall training goal is:
511
+ L = Ll1
512
+ sup + Ll2
513
+ sup + βLscl
514
+ (7)
515
+ As our classifications are done in the first level
516
+ and second level for the same inputs, we used a
517
+ standard cross-entropy loss to get supervised loss
518
+ LL1
519
+ sup and LL2
520
+ sup. And β is the weighting factor for
521
+ the contrastive loss.
522
+ 5
523
+ Experiment Setting
524
+ 5.1
525
+ Datasets
526
+ Besides providing a sense hierarchy, the Penn Dis-
527
+ course TreeBank (PDTB) also frequently serves as
528
+ a dataset for evaluating the recognition of discourse
529
+ relations. The earlier corpus, PDTB-2 (Prasad et al.,
530
+ 2008) included 40,600 annotated relations, while
531
+ the later version, PDTB-3 (Webber et al., 2019)
532
+ includes an additional 13K annotations, primarily
533
+ intra-sentential, as well as correcting some incon-
534
+ sistencies in the PDTB-2. The sense hierarchy used
535
+ in the PDTB-3 differs somewhat from that used in
536
+ the PDTB-2, with additions motivated by the needs
537
+ of annotating intra-sentential relations and changes
538
+ motivated by difficulties that annotators had in con-
539
+ sistently using some of the senses in the PDTB-2
540
+ hierarchy.
541
+ Because of the differences in these two hierar-
542
+ chies, we use the PDTB-2 hierarchy for PDTB-2
543
+ data and the PDTB-3 hierarchy for PDTB-3 data
544
+ respectively. We follow earlier work (Ji and Eisen-
545
+ stein, 2015; Bai and Zhao, 2018; Liu et al., 2020;
546
+ Xiang et al., 2022) using Sections 2-20 of the cor-
547
+ pus for Training, Sections 0-1 for Validation, and
548
+ Sections 21-22 for testing. With regard to those
549
+ instances with multiple annotated labels, we also
550
+ follow previous work (Qin et al., 2016). They are
551
+ treated as separate examples during training. At
552
+ test time, a prediction matching one of the gold
553
+ types is taken as the correct answer. Implicit rela-
554
+ tion recognition is usually treated as a classifica-
555
+ tion task. While 4-way (Level-1) classification was
556
+ carried out on both PDTB-2 and PDTB-3, more
557
+ detailed 11-way (Level 2) classification was done
558
+ only on the PDTB-2 and 14-way (Level 2) classifi-
559
+ cation, only on the PDTB-3.
560
+ 5.2
561
+ Baselines
562
+ To exhibit the effectiveness of our proposed
563
+ method, we compare our method with strong base-
564
+ lines. As previous work usually used one dataset
565
+ (PDTB-2 or PDTB-3) for evaluation, we use dif-
566
+ ferent baselines for PDTB-2 and PDTB-3. Since
567
+ PDTB-3 was not released until 2019, the baselines
568
+ for PDTB-3 from 2016 and 2017 are from (Xiang
569
+ et al., 2022). They reproduced those models which
570
+ were originally used on PDTB-2 on PDTB-3.
571
+ Baselines for PDTB-2:
572
+ • (Dai and Huang, 2019): a neural model lever-
573
+ aging external event knowledge and corefer-
574
+ ence relations.
575
+ • (Shi and Demberg, 2019a): a neural model
576
+ that leverages the inserted connectives to learn
577
+ better argument representations.
578
+ • (Nguyen et al., 2019): a neural model which
579
+ predicts the labels and connectives. simulta-
580
+ neously.
581
+ • (Guo et al., 2020): a knowledge-enhanced
582
+ Neural Network framework.
583
+ • (Kishimoto et al., 2020): a model applying
584
+ three additional training tasks.
585
+ • (Liu et al., 2020): a RoBERTa-based model
586
+ which consists of three different modules.
587
+ • (Jiang et al., 2021): a method that recognizes
588
+ the relation label and generates the target sen-
589
+ tence simultaneously.
590
+ • (Dou et al., 2021): a method using conditional
591
+ VAE to estimate the risk of erroneous sam-
592
+ pling.
593
+ • (Wu et al., 2022): a label dependence-aware
594
+ sequence generation model.
595
+ Baselines for PDTB-3:
596
+ • (Liu and Li, 2016): a model that combines
597
+ two arguments’ representation for stacked in-
598
+ teractive attention.
599
+
600
+ �1
601
+ Acc
602
+ β
603
+ β
604
+ (a) Top-level label classification
605
+ (b) Second-level label classification
606
+ PDTB3
607
+ PDTB2
608
+ Figure 4: Effects of β on the validation set.
609
+ • (Chen et al., 2016a): a mixed generative-
610
+ discriminative framework.
611
+ • (Lan et al., 2017): a multi-task attention neu-
612
+ ral network.
613
+ • (Ruan et al., 2020): a propagative attention
614
+ learning model.
615
+ • (Xiang et al., 2022): a model that uses a Dual
616
+ Attention Network (DAN).
617
+ 5.3
618
+ Parameters Setting
619
+ In our experiments,
620
+ we use the pre-trained
621
+ RoBERTa-base (Liu et al., 2019) as our Encoder.
622
+ We adopt Adam (Kingma and Ba, 2015) with the
623
+ learning rate of 3e−5 and the batch size of 256 to
624
+ update the model. The maximum training epoch is
625
+ set to 25 and the wait patience for early stopping
626
+ is set to 10 for all models. We clip the gradient
627
+ L2-norm with a threshold 2.0. For contrast learn-
628
+ ing, the weight of positive examples is set to 1.6
629
+ and the weight of negative examples is set to 1. All
630
+ experiments are performed with 1× 80GB NVIDIA
631
+ A100 GPU.
632
+ 5.4
633
+ Evaluation Metrics
634
+ We used Accuracy and Macro-F1 score as evalu-
635
+ ation metrics, because PDTB datasets are imbal-
636
+ anced and Macro-F1 score has been said to be an
637
+ more appropriate assessment measure for imbal-
638
+ anced datasets (Akosa, 2017; Bekkar et al., 2013).
639
+ 5.5
640
+ Effects of the Coefficient β
641
+ As shown in Equation 7, the coefficient β is an
642
+ important hyperparameter that controls the relative
643
+ importance of supervised loss and contrastive loss.
644
+ Thus, we vary β from 0 to 2.4 with an increment of
645
+ 0.2 each step, and inspect the performance of our
646
+ model using different β on the validation set.
647
+ From Figure 4, we can find that, compared with
648
+ the model without contrastive learning (β = 0), the
649
+ performance of our model at any level is always
650
+ improved via contrastive learning. For PDTB-2,
651
+ when β exceeds 1.0, the performance of our model
652
+ tends to be stable and declines finally. Thus, we
653
+ directly set β = 1.0 for all PDTB-2 related exper-
654
+ iments thereafter. For PDTB-3, the Acc and F1
655
+ of the validation set reach the highest point at β =
656
+ 2.0. Therefore we choose β = 2.0 for all related
657
+ experiments.
658
+ We have considered three ways of investigat-
659
+ ing why there is such a difference in the optimal
660
+ weighting coefficient. First, compared with PDTB-
661
+ 2, the PDTB-3 contains about 6000 more implicit
662
+ tokens annotated for discourse relations. Secondly,
663
+ although the sense hierarchies of both the PDTB-
664
+ 2 and the PDTB-3 have three levels and have the
665
+ same senses at level- 1, but many changes at level-2
666
+ and level-3 due to difficulties found in annotating
667
+ certain senses. Moreover, the intra-sentential im-
668
+ plicit relations might be another reason. In PDTB-
669
+ 3, many more discourse relations are annotated
670
+ within sentences. Liang et al. (2020) report quite
671
+ striking difference in the distribution of sense re-
672
+ lations inter-sententially vs. intra-sententially be-
673
+ tween PDTB-2 and PDTB-3. Therefore, these ma-
674
+ jor differences in the PDTB-3 and the PDTB-2
675
+ might cause the fluctuation of the coefficient value.
676
+ 6
677
+ Results and Analysis
678
+ The results on PDTB-2 and PDTB-3 for Level-1
679
+ and Level-2 are presented in Table 1 and Table 2
680
+ respectively, where the best results are highlighted
681
+ in bold. Classification performance on PDTB-2 in
682
+ terms of Macro-F1 for the four general sense types
683
+ at Level-1 and 11 sense types at Level-2 is shown
684
+ in Table 3 and Table 4.
685
+ These results demonstrate better performance
686
+ than previous systems for both Level-1 and Level-2
687
+ classification on both PDTB-2 and PDTB-3. In
688
+ particular, the results clearly demonstrate benefits
689
+ to be gained from contrastive learning. But there is
690
+ more to be said: In Section 6.1, we discuss different
691
+ ways of defining negative examples with respect to
692
+ the sense hierarchy, and in Section 6.2, we discuss
693
+ the relative value of the particular form of data
694
+ augmentation we have used (cf. Section 4.2) as
695
+ compared with our method of contrastive learning.
696
+ 6.1
697
+ Comparisons with Other Negatives
698
+ Selecting Methods
699
+ There is not only one way to select negative ex-
700
+ amples for contrastive learning based on PDTB
701
+
702
+ 75
703
+ 70
704
+ 65
705
+ 60
706
+ 55
707
+ 0
708
+ 0.2
709
+ 0.4
710
+ 0.6
711
+ 0.8
712
+ 1
713
+ 1.2
714
+ 1.4
715
+ 1.6
716
+ 1.8
717
+ 2
718
+ 2.2
719
+ 2.460
720
+ 55
721
+ 50
722
+ 45
723
+ 40
724
+ 35
725
+ 0
726
+ 0.2
727
+ 0.4
728
+ 0.6
729
+ 0.8
730
+ 1
731
+ 1.2
732
+ 1.4
733
+ 1.6
734
+ 1.8
735
+ 2
736
+ 2.2
737
+ 2.465
738
+ 62
739
+ 59
740
+ 56
741
+ 53
742
+ 50
743
+ 0
744
+ 0.2
745
+ 0.4
746
+ 0.6
747
+ 0.8
748
+ 1
749
+ 1.2
750
+ 1.4
751
+ 1.6
752
+ 1.8
753
+ 2
754
+ 2.2
755
+ 2.475
756
+ 73
757
+ 71
758
+ 69
759
+ 67
760
+ 65
761
+ 0
762
+ 0.2
763
+ 0.4
764
+ 0.6
765
+ 0.8
766
+ 1
767
+ 1.2
768
+ 1.4
769
+ 1.6
770
+ 1.8
771
+ 2
772
+ 2.2
773
+ 2.4Model
774
+ PDTB-2
775
+ Top Level
776
+ Second Level
777
+ Acc
778
+ Macro-F1
779
+ Acc
780
+ Macro-F1
781
+ Dai and Huang (2019)
782
+ 59.66
783
+ 52.89
784
+ 48.23
785
+ 33.41
786
+ Shi and Demberg (2019a)
787
+ 61.42
788
+ 46.40
789
+ 47.83
790
+ -
791
+ Nguyen et al. (2019)
792
+ -
793
+ 53.00
794
+ 49.95
795
+ -
796
+ Guo et al. (2020)
797
+ 57.25
798
+ 47.90
799
+ -
800
+ -
801
+ Kishimoto et al. (2020)
802
+ 65.26
803
+ 58.48
804
+ 52.34
805
+ -
806
+ Liu et al. (2020)
807
+ 69.06
808
+ 63.39
809
+ 58.13
810
+ -
811
+ Jiang et al. (2021)
812
+ -
813
+ 57.18
814
+ -
815
+ 37.76
816
+ Dou et al. (2021)
817
+ 70.17
818
+ 65.06
819
+ -
820
+ -
821
+ Wu et al. (2022)
822
+ 71.18
823
+ 63.73
824
+ 60.33
825
+ 40.49
826
+ Ours
827
+ 72.18
828
+ 69.60
829
+ 61.69
830
+ 49.66
831
+ Table 1: Experimental results on PDTB-2.
832
+ Model
833
+ PDTB-3
834
+ Top Level
835
+ Second Level
836
+ Acc
837
+ Macro-F1
838
+ Acc
839
+ Macro-F1
840
+ Liu and Li (2016)
841
+ 57.67
842
+ 46.13
843
+ -
844
+ -
845
+ Chen et al. (2016b)
846
+ 57.33
847
+ 45.11
848
+ -
849
+ -
850
+ Lan et al. (2017)
851
+ 57.06
852
+ 47.29
853
+ -
854
+ -
855
+ Ruan et al. (2020)
856
+ 58.01
857
+ 49.45
858
+ -
859
+ -
860
+ Xiang et al. (2022)
861
+ 60.45
862
+ 53.14
863
+ -
864
+ -
865
+ (BiLSTM)
866
+ Xiang et al. (2022)
867
+ 64.04
868
+ 56.63
869
+ -
870
+ -
871
+ (BERT)
872
+ Ours
873
+ 75.31
874
+ 70.05
875
+ 64.68
876
+ 57.62
877
+ Table 2: Experimental results on PDTB-3.
878
+ Model
879
+ Comp.
880
+ Cont
881
+ Exp.
882
+ Temp.
883
+ Nguyen et al. (2019)
884
+ 48.44
885
+ 56.84
886
+ 73.66
887
+ 38.60
888
+ Guo et al. (2020)
889
+ 43.92
890
+ 57.67
891
+ 73.45
892
+ 36.33
893
+ Liu et al. (2020)
894
+ 59.44
895
+ 60.98
896
+ 77.66
897
+ 50.26
898
+ Jiang et al. (2021)
899
+ 55.40
900
+ 57.04
901
+ 74.76
902
+ 41.54
903
+ Dou et al. (2021)
904
+ 55.72
905
+ 63.39
906
+ 80.34
907
+ 44.01
908
+ Ours
909
+ 65.84
910
+ 63.55
911
+ 79.17
912
+ 69.86
913
+ Table 3: The results for relation types at level-1 on
914
+ PDTB-2 in terms of F1 (%) (top-level multi-class clas-
915
+ sification).
916
+ hierarchical structures. In addition to the method
917
+ we adopt, we have explored another 4 different
918
+ methods of defining positive and negative exam-
919
+ ples by using the sense hierarchies, which can be
920
+ shown in Figure 5. One can choose the level against
921
+ which to select negative examples: method 2 be-
922
+ low uses examples with different labels at level-2,
923
+ while methods 1, 3 and 4 use examples with dif-
924
+ ferent labels at level-1. With regard to the use of
925
+ weight for method 3 and method 4, we aim to give
926
+ more weight to more similar (potentially) positive
927
+ examples based on the hierarchy. Specifically, we
928
+ give more weight to the examples from the same
929
+ level-2/level-3 type than their sister types at level-
930
+ 2/level-3 when all of the examples from the same
931
+ level-1 are positive examples. Besides, method 4
932
+ leverages level-3 labels, while method 1 to 3 only
933
+ consider level-1 and level-2 labels. In our exper-
934
+ iments for other negatives defining methods, we
935
+ use the same hyperparameters as the experimental
936
+ setup of our methods.For method 3 and method 4,
937
+ Second-level Label
938
+ Liu et al. (2020)
939
+ Wu et al. (2022)
940
+ Ours
941
+ Temp.Asynchronous
942
+ 56.18
943
+ 56.47
944
+ 59.79
945
+ Temp.Synchrony
946
+ 0.00
947
+ 0.00
948
+ 78.26
949
+ Cont.Cause
950
+ 59.60
951
+ 64.36
952
+ 65.58
953
+ Cont.Pragmatic cause
954
+ 0.0
955
+ 0.0
956
+ 0.00
957
+ Comp.Contrast
958
+ 59.75
959
+ 63.52
960
+ 62.63
961
+ Comp.Concession
962
+ 0.0
963
+ 0.0
964
+ 0.00
965
+ Exp.Conjunction
966
+ 60.17
967
+ 57.91
968
+ 58.35
969
+ Exp.Instantiation
970
+ 67.96
971
+ 72.60
972
+ 73.04
973
+ Exp.Restatement
974
+ 53.83
975
+ 58.06
976
+ 60.00
977
+ Exp.Alternative
978
+ 60.00
979
+ 63.46
980
+ 53.85
981
+ Exp.List
982
+ 0.0
983
+ 8.98
984
+ 34.78
985
+ Table 4: The results for relation types at level-2 on
986
+ PDTB-2 in terms of F1 (%) (second-level multi-class
987
+ classification).
988
+ the weight of positive examples is set to 1.6 and
989
+ 1.3 and the weight of negative examples still is 1.
990
+ It can be seen from table 5 and table 6 that our
991
+ method is better than the above methods in both
992
+ datasets for both level-1 and level-2 classification
993
+ tasks. Compared with method 2, we utilize level-3
994
+ labels, which indicated the level-3 label informa-
995
+ tion is helpful for the approach. The greatest differ-
996
+ ence between our method and other three methods
997
+ is that our negative examples are only those sis-
998
+ ter types at level-2 or level-3, not including the
999
+ examples from different level-1. On the contrary,
1000
+ the negative examples in those three methods are
1001
+ examples from other level-1 types. We suppose
1002
+ that this might make a too strong assumption that
1003
+ examples from different level-1 are very dissimilar.
1004
+ In PDTB datasets, some examples have been anno-
1005
+ tated with multiple labels. We found that among all
1006
+ examples with multiple annotated labels, there are
1007
+ 99.26% examples whose multiple labels are under
1008
+ different level-1. Moreover, some level-1 types of
1009
+ relation might be overlapped even if the annotators
1010
+ just annotate one label. For example, some exam-
1011
+ ples annotated as Temporal.asynchronous might
1012
+ have the sense of Contingency.cause as well. And
1013
+ Moens and Steedman (1988) have pointed out that
1014
+ when-clauses do not simply predicate a temporal re-
1015
+ lation, but a causal one as well, which can be called
1016
+ contingency. This shows up in the PDTB in terms
1017
+ of the variation in how particular tokens of when
1018
+ clauses have been annotated. But it also means
1019
+ that in choosing Negative examples, relations la-
1020
+ belled TEMPORAL.SYNCHRONOUS or TEMPO-
1021
+ RAL.ASYNCHRONOUS may closely resemble those
1022
+ labelled CONTINGENCY.CAUSE and therefore not
1023
+ be effective as negative examples. Specifically, for
1024
+ the following example:
1025
+ (4) when [they built the 39th Street bridge]1,
1026
+ [they solved most of their traffic problems]2.
1027
+
1028
+ Model
1029
+ PDTB-2
1030
+ PDTB-3
1031
+ Top Level
1032
+ Second Level
1033
+ Top Level
1034
+ Second Level
1035
+ Acc
1036
+ Macro-F1
1037
+ Acc
1038
+ Macro-F1
1039
+ Acc
1040
+ Macro-F1
1041
+ Acc
1042
+ Macro-F1
1043
+ Method 1
1044
+ 68.91
1045
+ 65.04
1046
+ 58.61
1047
+ 46.27
1048
+ 73.25
1049
+ 68.00
1050
+ 61.17
1051
+ 55.58
1052
+ Method 2
1053
+ 69.39
1054
+ 63.95
1055
+ 58.33
1056
+ 44.80
1057
+ 73.53
1058
+ 68.36
1059
+ 61.93
1060
+ 54.85
1061
+ Method 3
1062
+ 69.39
1063
+ 66.53
1064
+ 58.61
1065
+ 39.20
1066
+ 72.49
1067
+ 67.49
1068
+ 60.77
1069
+ 54.33
1070
+ Method 4
1071
+ 69.10
1072
+ 65.30
1073
+ 57.07
1074
+ 47.46
1075
+ 71.26
1076
+ 66.47
1077
+ 59.53
1078
+ 47.24
1079
+ Ours
1080
+ 72.18
1081
+ 69.60
1082
+ 61.69
1083
+ 49.66
1084
+ 75.31
1085
+ 70.05
1086
+ 64.48
1087
+ 57.62
1088
+ Table 5: Comparisons with other negatives defining methods.
1089
+ Positve: examples with same label at level-1.
1090
+ Negative: examples with different labels at level-1.
1091
+ (a) method 1
1092
+ Positve: examples with same label at level-2.
1093
+ Negative: examples with different labels at level-2.
1094
+ (b) method 2
1095
+ Positve: Examples with same label at level-1.
1096
+ More weight are given to the examples
1097
+ with same label at level-2.
1098
+ Negative: Examples with different labels at level-1.
1099
+ (c) method 3
1100
+ Positve: Examples with same label at level-1,
1101
+ more weight are given to the examples
1102
+ with same label at level-2 or level-3.
1103
+ Negative: Examples with different labels at level-1.
1104
+ (d) method 4
1105
+ Figure 5: Another four negative examples selected
1106
+ methods. orange ball represent anchor, green ball rep-
1107
+ resent negative examples, and blue ball represent posi-
1108
+ tive examples. Darker blue ball means more weight is
1109
+ given to more similar (potentially) positive examples.
1110
+ If the connective “when” is replaced with “be-
1111
+ cause”, the sentence still sounds not strange. There-
1112
+ fore, regarding all examples from different level-1
1113
+ as negative examples might have some negative
1114
+ impacts on learning the representations.
1115
+ 6.2
1116
+ Ablation Study
1117
+ We wanted to know how useful our data augmen-
1118
+ tation method and our contrastive learning method
1119
+ are, so we have undertaken ablation studies for this.
1120
+ Model
1121
+ Comp.
1122
+ Cont
1123
+ Exp.
1124
+ Temp.
1125
+ Method 1
1126
+ 63.26
1127
+ 60.42
1128
+ 76.78
1129
+ 59.74
1130
+ Method 2
1131
+ 60.78
1132
+ 60.82
1133
+ 77.89
1134
+ 56.30
1135
+ Method 3
1136
+ 59.85
1137
+ 65.18
1138
+ 76.43
1139
+ 64.67
1140
+ Method 4
1141
+ 57.25
1142
+ 61.73
1143
+ 77.30
1144
+ 64.90
1145
+ Ours
1146
+ 65.84
1147
+ 63.55
1148
+ 79.17
1149
+ 69.86
1150
+ Table 6: The results of relation types at level-1 on
1151
+ PDTB-2 in terms of F1 (%) (top-level multi-class clas-
1152
+ sification).
1153
+ Effects
1154
+ of
1155
+ contrastive
1156
+ learning
1157
+ algorithm
1158
+ From Table 7, it can be seen that multi-task
1159
+ learning
1160
+ method
1161
+ where
1162
+ level-1
1163
+ and
1164
+ level-2
1165
+ labels are predicted simultaneously by using the
1166
+ same [CLS] representation perform better than
1167
+ separately predicting level-1 and level-2 labels,
1168
+ which verifies the dependency between different
1169
+ levels.
1170
+ Compared with the multi-task learning
1171
+ method, our model with a contrastive loss has
1172
+ better performance in PDTB-2 and PDTB-3, which
1173
+ means that our contrasting learning method is
1174
+ indeed helpful.
1175
+ Effects of data augmentation
1176
+ Table 8 compares
1177
+ the results with and without data augmentation for
1178
+ both PDTB-2 and PDTB-3. From the comparisons,
1179
+ it is clear that the data augmentation method is
1180
+ helpful to generate useful examples. Khosla et al.
1181
+ (2020) showed that having a large number of hard
1182
+ positives/negatives in a batch leads to better perfor-
1183
+ mance. Since we have many classes at the second
1184
+ level, 11 types for PDTB-2 and 14 types for PDTB-
1185
+ 3. In a batch with the size of 256, it is difficult to
1186
+ guarantee that there are enough positive examples
1187
+ for each class to take full advantage of contrast
1188
+ learning. Therefore, without data augmentation,
1189
+ the performance of our method degrades consider-
1190
+ ably.
1191
+ 7
1192
+ Limitations and Future work
1193
+ With regard to PDTB-2 and PDTB-3 annotation,
1194
+ there are two cases: (1) Annotators can assign mul-
1195
+ tiple labels to an example when they believe more
1196
+ than one relation holds simultaneously; (2) An-
1197
+ notators can be told (in the Annotation Manual)
1198
+ to give precedence to one label if they take more
1199
+ than one to hold. For example, they are told in
1200
+ the Manual (Webber et al., 2019) that examples
1201
+ that satisfy the conditions for both Contrast and
1202
+ Concession, should be labelled as concession. We
1203
+ over-simplified the presence of multiple labels by
1204
+ following Qin et al. (2017) in treating each label as
1205
+
1206
+ Datasets
1207
+ Model
1208
+ Top Level
1209
+ Second Level
1210
+ Acc
1211
+ Macro-F1
1212
+ Acc
1213
+ Macro-F1
1214
+ PDTB-2
1215
+ RoBERTa
1216
+ 68.14
1217
+ 64.87
1218
+ 58.33
1219
+ 48.37
1220
+ RoBERTa-MTL
1221
+ 69.87
1222
+ 65.39
1223
+ 58.22
1224
+ 45.21
1225
+ Ours
1226
+ 72.18
1227
+ 69.60
1228
+ 61.69
1229
+ 49.66
1230
+ PDTB-3
1231
+ RoBERTa
1232
+ 72.02
1233
+ 67.44
1234
+ 60.56
1235
+ 57.12
1236
+ RoBERTa-MTL
1237
+ 72.63
1238
+ 68.23
1239
+ 60.56
1240
+ 57.16
1241
+ Ours
1242
+ 75.31
1243
+ 70.05
1244
+ 64.68
1245
+ 57.62
1246
+ Table 7: Ablation study on PDTB-2 and PDTB-3.
1247
+ Model
1248
+ Top Level
1249
+ Second Level
1250
+ Acc
1251
+ Macro-F1
1252
+ Acc
1253
+ Macro-F1
1254
+ PDTB-2
1255
+ Ours
1256
+ 72.18
1257
+ 69.60
1258
+ 61.69
1259
+ 49.66
1260
+ -augmentation
1261
+ 71.70
1262
+ 67.85
1263
+ 59.19
1264
+ 45.54
1265
+ PDTB-3
1266
+ Ours
1267
+ 75.31
1268
+ 70.05
1269
+ 64.68
1270
+ 57.62
1271
+ -augmentation
1272
+ 73.32
1273
+ 69.02
1274
+ 63.24
1275
+ 51.80
1276
+ Table 8: Effects of data augmentation.
1277
+ a separate example and did not consider the second
1278
+ case. Thus, our approach might be inadequate for
1279
+ dealing with the actual distribution of the data and
1280
+ can be extended or modified. It is worth exploring
1281
+ how to extend our approach to allow for examples
1282
+ with multiple sense labels and cases where one la-
1283
+ bel takes precedence over another. We believe that
1284
+ this will be an important property of the work.
1285
+ Another limitation is that we only use English
1286
+ datasets. There are PDTB-style datasets in other
1287
+ languages including a Chinese TED dicourse bank
1288
+ corpus (Long et al., 2020), a Turkish discourse
1289
+ Tree bank corpus (Zeyrek and Kurfalı, 2017) and
1290
+ an Italian Discourse Treebank (Pareti and Prodanof,
1291
+ 2010). Moreover, Zeyrek et al. (2019) proposed
1292
+ a TED Multilingual Discourse Bank (TED-MDB)
1293
+ corpus, which has 6 languages. These datasets al-
1294
+ low us to assess the approach in languages other
1295
+ than English. Besides, there are datasets similar
1296
+ to PDTB-Style like Prague Dependency Treebank
1297
+ (Mírovský et al., 2014). The different datasets use
1298
+ essentially similar sense hierarchy, but two things
1299
+ need to be investigated (i) whether there are compa-
1300
+ rable differences between tokens that realise “sis-
1301
+ ter” relations, or (ii) whether tokens often have
1302
+ multiple sense labels, which would change what
1303
+ could be used as negative examples if leveraging
1304
+ our approach on them.
1305
+ In the future, we can also assess whether con-
1306
+ trastive learning could help in separating out En-
1307
+ tRel relations and AltLex relations from implicit
1308
+ relations or whether other methods would perform
1309
+ better.
1310
+ 8
1311
+ Conclusions
1312
+ In this paper, we leverage the sense hierarchy to
1313
+ select the negative examples needed for contrastive
1314
+ learning for the task of implicit discourse relation
1315
+ recognition. Our method has better overall perfor-
1316
+ mance than achieved by previous systems, and com-
1317
+ pared with previous work, our method is better at
1318
+ learning minority labels. Moreover, we compared
1319
+ different methods of selecting the negative exam-
1320
+ ples based on the hierarchical structures, which
1321
+ shows some potential negative impacts might be
1322
+ produced when negative examples include those
1323
+ from other level-1 types. Moreover, we conduct ab-
1324
+ lation studies to investigate the effects of our data
1325
+ augmentation method and our contrastive learning
1326
+ method. Besides, the limitations and the future
1327
+ work are discussed.
1328
+ Acknowledgments
1329
+ This work was supported in part by the UKRI
1330
+ Centre for Doctoral Training in Natural Lan-
1331
+ guage Processing, funded by the UKRI (grant
1332
+ EP/S022481/1), the University of Edinburgh. The
1333
+ authors also gratefully acknowledge University of
1334
+ Edinburgh Huawei Laboratory for their support.
1335
+ References
1336
+ Josephine Sarpong Akosa. 2017. Predictive accuracy
1337
+ : A misleading performance measure for highly im-
1338
+ balanced data.
1339
+ Moustafa Farid Alzantot, Yash Sharma, Ahmed Elgo-
1340
+ hary, Bo-Jhang Ho, Mani B. Srivastava, and Kai-Wei
1341
+ Chang. 2018. Generating natural language adversar-
1342
+ ial examples. In EMNLP.
1343
+ Hongxiao Bai and Hai Zhao. 2018.
1344
+ Deep enhanced
1345
+ representation for implicit discourse relation recog-
1346
+ nition. In Proceedings of the 27th International Con-
1347
+ ference on Computational Linguistics, pages 571–
1348
+ 583, Santa Fe, New Mexico, USA. Association for
1349
+ Computational Linguistics.
1350
+
1351
+ Mohamed Bekkar, Hassiba Kheliouane Djemaa, and
1352
+ Taklit Akrouf Alitouche. 2013.
1353
+ Evaluation mea-
1354
+ sures for models assessment over imbalanced data
1355
+ sets. Journal of Information Engineering and Appli-
1356
+ cations, 3:27–38.
1357
+ Rui Cao, Yihao Wang, Yu Guo Liang, Ling Gao, Jie
1358
+ Zheng, Jie Ren, and Zheng Wang. 2022. Exploring
1359
+ the impact of negative samples of contrastive learn-
1360
+ ing: A case study of sentence embedding. In FIND-
1361
+ INGS.
1362
+ Jifan Chen, Qi Zhang, Pengfei Liu, and Xuanjing
1363
+ Huang. 2016a.
1364
+ Discourse relations detection via
1365
+ a mixed generative-discriminative framework.
1366
+ In
1367
+ AAAI.
1368
+ Jifan Chen, Qi Zhang, Pengfei Liu, Xipeng Qiu, and
1369
+ Xuanjing Huang. 2016b.
1370
+ Implicit discourse rela-
1371
+ tion detection via a deep architecture with gated rel-
1372
+ evance network.
1373
+ In Proceedings of the 54th An-
1374
+ nual Meeting of the Association for Computational
1375
+ Linguistics (Volume 1: Long Papers), pages 1726–
1376
+ 1735, Berlin, Germany. Association for Computa-
1377
+ tional Linguistics.
1378
+ Ting Chen, Simon Kornblith, Mohammad Norouzi,
1379
+ and Geoffrey Hinton. 2020.
1380
+ A simple framework
1381
+ for contrastive learning of visual representations. In
1382
+ Proceedings of the 37th International Conference
1383
+ on Machine Learning, volume 119 of Proceedings
1384
+ of Machine Learning Research, pages 1597–1607.
1385
+ PMLR.
1386
+ Arman Cohan, Franck Dernoncourt, Doo Soon Kim,
1387
+ Trung Bui, Seokhwan Kim, Walter Chang, and Na-
1388
+ zli Goharian. 2018.
1389
+ A discourse-aware attention
1390
+ model for abstractive summarization of long docu-
1391
+ ments. In Proceedings of the 2018 Conference of
1392
+ the North American Chapter of the Association for
1393
+ Computational Linguistics: Human Language Tech-
1394
+ nologies, Volume 2 (Short Papers), pages 615–621,
1395
+ New Orleans, Louisiana. Association for Computa-
1396
+ tional Linguistics.
1397
+ Zeyu Dai and Ruihong Huang. 2018. Improving im-
1398
+ plicit discourse relation classification by modeling
1399
+ inter-dependencies of discourse units in a paragraph.
1400
+ In Proceedings of the 2018 Conference of the North
1401
+ American Chapter of the Association for Compu-
1402
+ tational Linguistics: Human Language Technolo-
1403
+ gies, Volume 1 (Long Papers), pages 141–151, New
1404
+ Orleans, Louisiana. Association for Computational
1405
+ Linguistics.
1406
+ Zeyu Dai and Ruihong Huang. 2019. A regularization
1407
+ approach for incorporating event knowledge and
1408
+ coreference relations into neural discourse parsing.
1409
+ In Proceedings of the 2019 Conference on Empirical
1410
+ Methods in Natural Language Processing and the
1411
+ 9th International Joint Conference on Natural Lan-
1412
+ guage Processing (EMNLP-IJCNLP), pages 2976–
1413
+ 2987, Hong Kong, China. Association for Computa-
1414
+ tional Linguistics.
1415
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and
1416
+ Kristina Toutanova. 2019.
1417
+ BERT: Pre-training of
1418
+ deep bidirectional transformers for language under-
1419
+ standing.
1420
+ In Proceedings of the 2019 Conference
1421
+ of the North American Chapter of the Association
1422
+ for Computational Linguistics: Human Language
1423
+ Technologies, Volume 1 (Long and Short Papers),
1424
+ pages 4171–4186, Minneapolis, Minnesota. Associ-
1425
+ ation for Computational Linguistics.
1426
+ Zujun Dou, Yu Hong, Yu Sun, and Guodong Zhou.
1427
+ 2021.
1428
+ CVAE-based re-anchoring for implicit dis-
1429
+ course relation classification. In Findings of the As-
1430
+ sociation for Computational Linguistics: EMNLP
1431
+ 2021, pages 1275–1283, Punta Cana, Dominican Re-
1432
+ public. Association for Computational Linguistics.
1433
+ Tianyu Gao, Xingcheng Yao, and Danqi Chen. 2021.
1434
+ SimCSE: Simple contrastive learning of sentence
1435
+ embeddings. In Proceedings of the 2021 Conference
1436
+ on Empirical Methods in Natural Language Process-
1437
+ ing, pages 6894–6910, Online and Punta Cana, Do-
1438
+ minican Republic. Association for Computational
1439
+ Linguistics.
1440
+ Beliz Gunel, Jingfei Du, Alexis Conneau, and Veselin
1441
+ Stoyanov. 2021. Supervised contrastive learning for
1442
+ pre-trained language model fine-tuning. In Interna-
1443
+ tional Conference on Learning Representations.
1444
+ Fengyu Guo, Ruifang He, Jianwu Dang, and Jian Wang.
1445
+ 2020.
1446
+ Working memory-driven neural networks
1447
+ with a novel knowledge enhancement paradigm for
1448
+ implicit discourse relation recognition. In AAAI.
1449
+ Yangfeng Ji and Jacob Eisenstein. 2015. One vector is
1450
+ not enough: Entity-augmented distributed semantics
1451
+ for discourse relations. Transactions of the Associa-
1452
+ tion for Computational Linguistics, 3:329–344.
1453
+ Feng Jiang, Yaxin Fan, Xiaomin Chu, Peifeng Li, and
1454
+ Qiaoming Zhu. 2021. Not just classification: Recog-
1455
+ nizing implicit discourse relation on joint modeling
1456
+ of classification and generation. In Proceedings of
1457
+ the 2021 Conference on Empirical Methods in Natu-
1458
+ ral Language Processing, pages 2418–2431, Online
1459
+ and Punta Cana, Dominican Republic. Association
1460
+ for Computational Linguistics.
1461
+ Robinson Joshua, Chuang Ching-Yao, Sra Suvrit, and
1462
+ Jegelka Stefanie. 2021.
1463
+ Contrastive learning with
1464
+ hard negative samples. International Conference on
1465
+ Learning Representations.
1466
+ Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron
1467
+ Sarna,
1468
+ Yonglong
1469
+ Tian,
1470
+ Phillip
1471
+ Isola,
1472
+ Aaron
1473
+ Maschinot, Ce Liu, and Dilip Krishnan. 2020. Su-
1474
+ pervised contrastive learning. In Advances in Neural
1475
+ Information Processing Systems, volume 33, pages
1476
+ 18661–18673. Curran Associates, Inc.
1477
+ Taeuk Kim, Kang Min Yoo, and Sang-goo Lee. 2021.
1478
+ Self-guided contrastive learning for BERT sentence
1479
+ representations. In Proceedings of the 59th Annual
1480
+ Meeting of the Association for Computational Lin-
1481
+ guistics and the 11th International Joint Conference
1482
+
1483
+ on Natural Language Processing (Volume 1: Long
1484
+ Papers), pages 2528–2540, Online. Association for
1485
+ Computational Linguistics.
1486
+ Diederick P Kingma and Jimmy Ba. 2015. Adam: A
1487
+ method for stochastic optimization. In International
1488
+ Conference on Learning Representations (ICLR).
1489
+ Yudai Kishimoto, Yugo Murawaki, and Sadao Kuro-
1490
+ hashi. 2020. Adapting bert to implicit discourse re-
1491
+ lation classification with a focus on discourse con-
1492
+ nectives. In LREC.
1493
+ Man Lan, Jianxiang Wang, Yuanbin Wu, Zheng-Yu
1494
+ Niu, and Haifeng Wang. 2017. Multi-task attention-
1495
+ based neural networks for implicit discourse rela-
1496
+ tionship representation and identification.
1497
+ In Pro-
1498
+ ceedings of the 2017 Conference on Empirical Meth-
1499
+ ods in Natural Language Processing, pages 1299–
1500
+ 1308, Copenhagen, Denmark. Association for Com-
1501
+ putational Linguistics.
1502
+ Li Liang, Zheng Zhao, and Bonnie Webber. 2020. Ex-
1503
+ tending implicit discourse relation recognition to
1504
+ the PDTB-3.
1505
+ In Proceedings of the First Work-
1506
+ shop on Computational Approaches to Discourse,
1507
+ pages 135–147, Online. Association for Computa-
1508
+ tional Linguistics.
1509
+ Xin Liu, Jiefu Ou, Yangqiu Song, and Xin Jiang.
1510
+ 2020.
1511
+ On the importance of word and sentence
1512
+ representation learning in implicit discourse relation
1513
+ classification. In Proceedings of the Twenty-Ninth
1514
+ International Joint Conference on Artificial Intel-
1515
+ ligence, IJCAI-20, pages 3830–3836. International
1516
+ Joint Conferences on Artificial Intelligence Organi-
1517
+ zation. Main track.
1518
+ Yang Liu and Sujian Li. 2016. Recognizing implicit
1519
+ discourse relations via repeated reading: Neural net-
1520
+ works with multi-level attention. In Proceedings of
1521
+ the 2016 Conference on Empirical Methods in Natu-
1522
+ ral Language Processing, pages 1224–1233, Austin,
1523
+ Texas. Association for Computational Linguistics.
1524
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man-
1525
+ dar Joshi, Danqi Chen, Omer Levy, Mike Lewis,
1526
+ Luke Zettlemoyer, and Veselin Stoyanov. 2019.
1527
+ Roberta: A robustly optimized BERT pretraining ap-
1528
+ proach. CoRR, abs/1907.11692.
1529
+ Wanqiu Long, Bonnie Lynn Webber, and Deyi Xiong.
1530
+ 2020. Ted-cdb: A large-scale chinese discourse re-
1531
+ lation dataset on ted talks. In EMNLP.
1532
+ Thomas Meyer, Najeh Hajlaoui, and Andrei Popescu-
1533
+ Belis. 2015.
1534
+ Disambiguating discourse connec-
1535
+ tives for statistical machine translation. IEEE/ACM
1536
+ Transactions on Audio, Speech, and Language Pro-
1537
+ cessing, 23:1–1.
1538
+ Jirí Mírovský, Pavlína Jínová, and Lucie Poláková.
1539
+ 2014. Discourse relations in the prague dependency
1540
+ treebank 3.0. In COLING.
1541
+ Marc Moens and Mark Steedman. 1988. Temporal on-
1542
+ tology and temporal reference. Computational Lin-
1543
+ guistics, 14(2):15–28.
1544
+ Linh The Nguyen, Ngo Van Linh, Khoat Than, and
1545
+ Thien Huu Nguyen. 2019. Employing the correspon-
1546
+ dence of relations and connectives to identify im-
1547
+ plicit discourse relations via label embeddings. In
1548
+ ACL.
1549
+ Silvia Pareti and Irina Prodanof. 2010. Annotating at-
1550
+ tribution relations: Towards an Italian discourse tree-
1551
+ bank. In Proceedings of the Seventh International
1552
+ Conference on Language Resources and Evaluation
1553
+ (LREC’10), Valletta, Malta. European Language Re-
1554
+ sources Association (ELRA).
1555
+ Gary Patterson and Andrew Kehler. 2013. Predicting
1556
+ the presence of discourse connectives. In EMNLP.
1557
+ Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt
1558
+ Gardner, Christopher Clark, Kenton Lee, and Luke
1559
+ Zettlemoyer. 2018. Deep contextualized word rep-
1560
+ resentations.
1561
+ In Proceedings of the 2018 Confer-
1562
+ ence of the North American Chapter of the Associ-
1563
+ ation for Computational Linguistics: Human Lan-
1564
+ guage Technologies, Volume 1 (Long Papers), pages
1565
+ 2227–2237, New Orleans, Louisiana. Association
1566
+ for Computational Linguistics.
1567
+ Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt-
1568
+ sakaki, Livio Robaldo, Aravind Joshi, and Bon-
1569
+ nie Webber. 2008.
1570
+ The Penn discourse TreeBank
1571
+ 2.0.
1572
+ In Proceedings of the Sixth International
1573
+ Conference on Language Resources and Evaluation
1574
+ (LREC’08), Marrakech, Morocco. European Lan-
1575
+ guage Resources Association (ELRA).
1576
+ Lianhui Qin, Zhisong Zhang, and Hai Zhao. 2016.
1577
+ Shallow discourse parsing using convolutional neu-
1578
+ ral network.
1579
+ In Proceedings of the CoNLL-16
1580
+ shared task, pages 70–77, Berlin, Germany. Asso-
1581
+ ciation for Computational Linguistics.
1582
+ Lianhui Qin, Zhisong Zhang, Hai Zhao, Zhiting Hu,
1583
+ and Eric Xing. 2017.
1584
+ Adversarial connective-
1585
+ exploiting networks for implicit discourse relation
1586
+ classification.
1587
+ In Proceedings of the 55th Annual
1588
+ Meeting of the Association for Computational Lin-
1589
+ guistics (Volume 1: Long Papers), pages 1006–1017,
1590
+ Vancouver, Canada. Association for Computational
1591
+ Linguistics.
1592
+ Huibin Ruan, Yu Hong, Yang Xu, Zhen Huang,
1593
+ Guodong Zhou, and Min Zhang. 2020. Interactively-
1594
+ propagative attention learning for implicit discourse
1595
+ relation recognition. In Proceedings of the 28th In-
1596
+ ternational Conference on Computational Linguis-
1597
+ tics, pages 3168–3178, Barcelona, Spain (Online).
1598
+ International Committee on Computational Linguis-
1599
+ tics.
1600
+ Attapol Rutherford and Nianwen Xue. 2015. Improv-
1601
+ ing the inference of implicit discourse relations via
1602
+
1603
+ classifying explicit discourse connectives. In Pro-
1604
+ ceedings of the 2015 Conference of the North Amer-
1605
+ ican Chapter of the Association for Computational
1606
+ Linguistics: Human Language Technologies, pages
1607
+ 799–808, Denver, Colorado. Association for Compu-
1608
+ tational Linguistics.
1609
+ Florian Schroff, Dmitry Kalenichenko, and James
1610
+ Philbin. 2015.
1611
+ Facenet: A unified embedding for
1612
+ face recognition and clustering. 2015 IEEE Confer-
1613
+ ence on Computer Vision and Pattern Recognition
1614
+ (CVPR), pages 815–823.
1615
+ Wei Shi and Vera Demberg. 2019a. Learning to explic-
1616
+ itate connectives with Seq2Seq network for implicit
1617
+ discourse relation classification. In Proceedings of
1618
+ the 13th International Conference on Computational
1619
+ Semantics - Long Papers, pages 188–199, Gothen-
1620
+ burg, Sweden. Association for Computational Lin-
1621
+ guistics.
1622
+ Wei Shi and Vera Demberg. 2019b. Next sentence pre-
1623
+ diction helps implicit discourse relation classifica-
1624
+ tion within and across domains. In Proceedings of
1625
+ the 2019 Conference on Empirical Methods in Nat-
1626
+ ural Language Processing and the 9th International
1627
+ Joint Conference on Natural Language Processing
1628
+ (EMNLP-IJCNLP), pages 5790–5796, Hong Kong,
1629
+ China. Association for Computational Linguistics.
1630
+ Varsha Suresh and Desmond C. Ong. 2021.
1631
+ Not all
1632
+ negatives are equal: Label-aware contrastive loss for
1633
+ fine-grained text classification. In EMNLP.
1634
+ Jialong Tang, Hongyu Lin, Meng Liao, Yaojie Lu, Xi-
1635
+ anpei Han, Le Sun, Weijian Xie, and Jin Xu. 2021.
1636
+ From discourse to narrative: Knowledge projection
1637
+ for event relation extraction. In Proceedings of the
1638
+ 59th Annual Meeting of the Association for Compu-
1639
+ tational Linguistics and the 11th International Joint
1640
+ Conference on Natural Language Processing (Vol-
1641
+ ume 1: Long Papers), pages 732–742, Online. As-
1642
+ sociation for Computational Linguistics.
1643
+ Dong Wang, Ning Ding, Piji Li, and Haitao Zheng.
1644
+ 2021. CLINE: Contrastive learning with semantic
1645
+ negative examples for natural language understand-
1646
+ ing. In Proceedings of the 59th Annual Meeting of
1647
+ the Association for Computational Linguistics and
1648
+ the 11th International Joint Conference on Natu-
1649
+ ral Language Processing (Volume 1: Long Papers),
1650
+ pages 2332–2342, Online. Association for Computa-
1651
+ tional Linguistics.
1652
+ Bonnie Webber, Rashmi Prasad, Alan Lee, and Ar-
1653
+ avind Joshi. 2019. The penn discourse treebank 3.0
1654
+ annotation manual.
1655
+ Changxing Wu, Liuwen Cao, Yubin Ge, Yang Liu, Min
1656
+ Zhang, and Jinsong Su. 2022. A label dependence-
1657
+ aware sequence generation model for multi-level im-
1658
+ plicit discourse relation recognition. In AAAI.
1659
+ Changxing Wu, Chaowen Hu, Ruochen Li, Hongyu
1660
+ Lin, and Jinsong Su. 2020a.
1661
+ Hierarchical multi-
1662
+ task learning with crf for implicit discourse relation
1663
+ recognition. Knowl. Based Syst., 195:105637.
1664
+ Zhuofeng Wu, Sinong Wang, Jiatao Gu, Madian
1665
+ Khabsa, Fei Sun, and Hao Ma. 2020b. Clear: Con-
1666
+ trastive learning for sentence representation. ArXiv,
1667
+ abs/2012.15466.
1668
+ Wei Xiang, Bang Wang, Lu Dai, and Yijun Mo.
1669
+ 2022.
1670
+ Encoding and fusing semantic connection
1671
+ and linguistic evidence for implicit discourse rela-
1672
+ tion recognition.
1673
+ In Findings of the Association
1674
+ for Computational Linguistics: ACL 2022, pages
1675
+ 3247–3257, Dublin, Ireland. Association for Com-
1676
+ putational Linguistics.
1677
+ Yuanmeng Yan, Rumei Li, Sirui Wang, Fuzheng Zhang,
1678
+ Wei Wu, and Weiran Xu. 2021. ConSERT: A con-
1679
+ trastive framework for self-supervised sentence rep-
1680
+ resentation transfer. In Proceedings of the 59th An-
1681
+ nual Meeting of the Association for Computational
1682
+ Linguistics and the 11th International Joint Confer-
1683
+ ence on Natural Language Processing (Volume 1:
1684
+ Long Papers), pages 5065–5075, Online. Associa-
1685
+ tion for Computational Linguistics.
1686
+ Seonghyeon Ye, Jiseon Kim, and Alice Oh. 2021. Ef-
1687
+ ficient contrastive learning via novel data augmen-
1688
+ tation and curriculum learning. In Proceedings of
1689
+ the 2021 Conference on Empirical Methods in Natu-
1690
+ ral Language Processing, pages 1832–1838, Online
1691
+ and Punta Cana, Dominican Republic. Association
1692
+ for Computational Linguistics.
1693
+ Deniz Zeyrek and Murathan Kurfalı. 2017. TDB 1.1:
1694
+ Extensions on Turkish discourse bank. In Proceed-
1695
+ ings of the 11th Linguistic Annotation Workshop,
1696
+ pages 76–81, Valencia, Spain. Association for Com-
1697
+ putational Linguistics.
1698
+ Deniz Zeyrek, Amalia Mendes, Yulia Grishina, Mu-
1699
+ rathan Kurfali, Samuel Gibbon, and Maciej Ogrod-
1700
+ niczuk. 2019. Ted multilingual discourse bank (ted-
1701
+ mdb): a parallel corpus annotated in the pdtb style.
1702
+ Language Resources and Evaluation, pages 1–38.
1703
+ Dejiao Zhang, Shang-Wen Li, Wei Xiao, Henghui Zhu,
1704
+ Ramesh Nallapati, Andrew O. Arnold, and Bing Xi-
1705
+ ang. 2021. Pairwise supervised contrastive learning
1706
+ of sentence representations. In Proceedings of the
1707
+ 2021 Conference on Empirical Methods in Natural
1708
+ Language Processing, pages 5786–5798, Online and
1709
+ Punta Cana, Dominican Republic. Association for
1710
+ Computational Linguistics.
1711
+
1712
+ A
1713
+ Appendix
1714
+ A.1
1715
+ PDTB Hierarchy
1716
+ The hierarchies of both PDTB 2.0 and PDTB 3.0
1717
+ consist of three levels, but for implicit relation
1718
+ recognition, so far no classification for third level
1719
+ labels has been done. We also focus on the hier-
1720
+ archy between level-1 and level 2. The PDTB-3
1721
+ relation hierarchy simplifies and extends the PDTB-
1722
+ 2 relation hierarchy. The PDTB 3.0 hierarchy not
1723
+ only simplifies the PDTB-2 relation hierarchy by
1724
+ restricting Level-3 relations to differences in di-
1725
+ rectionality and eliminating rare and/or difficult-
1726
+ to-annotate senses, but also augments the relation
1727
+ hierarchy. Figure 6 and Figure 7 show PDTB 2.0
1728
+ relation hierarchy and PDTB 3.0 relation hierarchy
1729
+ respectively.
1730
+ Asynchronous
1731
+ Synchronous
1732
+ Figure 6: The PDTB 2.0 Senses Hierarchy.
1733
+ Figure 7: The PDTB 3.0 Senses Hierarchy. The left-
1734
+ most column contains the Level-1 senses and the mid-
1735
+ dle column, the Level-2 senses. For asymmetric rela-
1736
+ tions, Level-3 senses are located in the rightmost col-
1737
+ umn.
1738
+ Model
1739
+ Comp.
1740
+ Cont
1741
+ Exp.
1742
+ Temp.
1743
+ Liu and Li (2016)
1744
+ 29.15
1745
+ 63.33
1746
+ 65.10
1747
+ 41.03
1748
+ Lan et al. (2017)
1749
+ 30.10
1750
+ 60.91
1751
+ 64.03
1752
+ 33.71
1753
+ Ruan et al. (2020)
1754
+ 30.37
1755
+ 61.95
1756
+ 64.28
1757
+ 34.74
1758
+ Chen et al. (2016b)
1759
+ 27.34
1760
+ 62.56
1761
+ 64.71
1762
+ 38.91
1763
+ Xiang et al. (2022)
1764
+ 34.16
1765
+ 65.48
1766
+ 67.82
1767
+ 40.22
1768
+ (BiLSTM)
1769
+ Xiang et al. (2022)
1770
+ 35.83
1771
+ 66.77
1772
+ 70.00
1773
+ 42.13
1774
+ (BERT)
1775
+ Ours
1776
+ 63.30
1777
+ 78.60
1778
+ 79.91
1779
+ 58.39
1780
+ Table 9: The results of different relations on PDTB-3
1781
+ in terms of F1 (%) (top-level multi-class classification).
1782
+ Second-level Label
1783
+ Ours
1784
+ Temp.Asynchronous
1785
+ 66.35
1786
+ Temp.Synchrony
1787
+ 41.38
1788
+ Cont.Cause
1789
+ 71.38
1790
+ Cont.Cause+Belief
1791
+ 0.0
1792
+ Cont.Condition
1793
+ 74.07
1794
+ Cont.Purpose
1795
+ 96.05
1796
+ Comp.Contrast
1797
+ 56.91
1798
+ Comp.Concession
1799
+ 60.11
1800
+ Exp.Conjunction
1801
+ 61.70
1802
+ Exp.Equivalence
1803
+ 11.43
1804
+ Exp.Instantiation
1805
+ 69.83
1806
+ Exp.Level-of-detail
1807
+ 55.34
1808
+ Exp.Manner
1809
+ 78.43
1810
+ Exp.Substitution
1811
+ 63.77
1812
+ Table 10: The results of different relations on PDTB-3
1813
+ in terms of F1 (%) (second-level multi-class classifica-
1814
+ tion).
1815
+ A.2
1816
+ The results on relation types on PDTB-3
1817
+ We also examine the classification performance on
1818
+ PDTB-3 in terms of Macro-F1 for the four main
1819
+ relation types at level-1 and 14 sense types at level-
1820
+ 2. The results can be seen in Table 9 and Table 10.
1821
+ Our model has significantly better performance for
1822
+ all level-1 relations.
1823
+ As for level-2 sense types, because there are no
1824
+ results of previous systems, we just show the result
1825
+ of 14 level-2 sense types in PDTB-3 in terms of F1.
1826
+
1827
+ Synchronous
1828
+ Temporal
1829
+ Precedence
1830
+ Asynchronous
1831
+ Succession
1832
+ Reason
1833
+ Cause
1834
+ Result
1835
+ Conjunction
1836
+ Disjunction
1837
+ Negative-result*
1838
+ Equivalence
1839
+ -
1840
+ Arg1-as-cond
1841
+ Condition
1842
+ Arg1-as-instance
1843
+ Contingency
1844
+ Arg2-as-cond
1845
+ Instantiation
1846
+ Arg2-as-instance
1847
+ Arg1-as-negcond
1848
+ Negative condition
1849
+ Arg1-as-detail
1850
+ Arg2-as-negcond
1851
+ Level-of-detail
1852
+ Expansion
1853
+ Arg2-as-detail
1854
+ Arg1-as-goal
1855
+ Arg1-as-subst
1856
+ Purpose
1857
+ Arg2-as-goal
1858
+ Substitution
1859
+ Arg2-as-negGoal
1860
+ Arg2-as-subst
1861
+ Arg1-as-excpt
1862
+ Exception
1863
+ Contrast
1864
+ Arg2-as-excpt
1865
+ Similarity
1866
+ Manner
1867
+ Arg1-as-manner
1868
+ Comparison
1869
+ Arg1-as-denier*
1870
+ Arg2-as-manner
1871
+ Concession
1872
+ Arg2-as-denierTEMPORAL
1873
+ COMPARISON
1874
+ Asynchronous
1875
+ →Contrast
1876
+ → Synchronous
1877
+ juxtaposition
1878
+ > opposition
1879
+ precedence
1880
+ succession
1881
+ Pragmatic Contrast
1882
+ → Concession
1883
+ expectation
1884
+ contra-expectation
1885
+ CONTINGENCY
1886
+ Cause
1887
+ Pragmatic Concession
1888
+ reason
1889
+ result
1890
+ EXPANSION
1891
+ Conjunction
1892
+ Pragmatic Cause
1893
+ Instantiation
1894
+ justification
1895
+ Restatement
1896
+ Condition
1897
+ specification
1898
+ hypothetical
1899
+ equivalence
1900
+ general
1901
+ unreal present
1902
+ generalization
1903
+ unreal past
1904
+ Alternative
1905
+ factual present
1906
+ conjunctive
1907
+ factual past
1908
+ → disjunctive
1909
+ → chosen alternative
1910
+ Pragmatic Condition
1911
+ Exception
1912
+ relevance
1913
+ implicit assertion
1914
+ List
XNE0T4oBgHgl3EQf3QLX/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
Z9A0T4oBgHgl3EQfF_-o/content/tmp_files/2301.02041v1.pdf.txt ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.02041v1 [math.RA] 5 Jan 2023
2
+ A Note On Square-free Commuting Probabilities
3
+ of Finite Rings
4
+ Andrew Mendelsohn
5
+ Department of EEE, Imperial College London, London, SW7 2AZ, United Kingdom.
6
7
+ Abstract. It is shown that the commuting probability of a finite ring
8
+ cannot be a fraction with square-free denominator, resolving a conjecture
9
+ of Buckley and MacHale.
10
+ 1
11
+ Introduction
12
+ Let G be a finite group and U denote the uniform distribution. Let the commut-
13
+ ing probability of G, PG, be denoted
14
+ PG := Pra,b←U(G)(ab = ba).
15
+ An alternative characterisation is
16
+ PG =
17
+ 1
18
+ |G|2 {(x, y) ∈ G2 : xy = yx}.
19
+ Joseph made the following conjectures, where G is the set of all commuting
20
+ probabilities of finite groups [1]:
21
+ A. All limit points of G are rational.
22
+ B. The set G is well ordered by >.
23
+ C. The set 0 ∪ G is closed (that is, contains all its accumulation points).
24
+ In [2], Eberhard resolved conjectures A and B, and in [3] conjecture C was
25
+ resolved.
26
+ One can extend the definition of commuting probability to finite rings: set
27
+ PR =
28
+ 1
29
+ |R|2 |{(x, y) ∈ R2 : xy = yx}| =
30
+ 1
31
+ |R|2 |{(x, y) ∈ R2 : xy − yx = 0}|.
32
+ In [4], the following conjectures were made, where R is the set PR over all finite
33
+ rings R:
34
+ 1. 1/n ̸∈ R when n ∈ N is square-free.
35
+ 2. R ⊂ G.
36
+ 3. R coincides with the set of values of PG as G ranges over all finite nilpotent
37
+ groups of class at most 2.
38
+ 4. All limit points of R are rational.
39
+
40
+ 2
41
+ A. Mendelsohn
42
+ 5. For each 0 < t ≤ 1, there exists ǫt > 0 such that R ∩ (t − ǫt, t) = ∅.
43
+ 6. R does not contain any of its accumulation points.
44
+ Note conjectures 4, 5, and 6 correspond to Joseph’s conjectures. Moreover, con-
45
+ jectures 4 and 5 would follow from the veracity of conjectures 2 or 3, since
46
+ Eberhard showed G has rational limit points and is well-ordered. In [5], conjec-
47
+ ture 2 was in fact resolved, and thus conjectures 4 and 5. Moreover, conjecture
48
+ 3 was partially resolved: the authors obtained that R is a subset of the set of
49
+ values of PG as G ranges over all finite nilpotent groups of class at most 2. We
50
+ conclude that conjectures 1, 3, and 6 are open.
51
+ In this work, we resolve conjecture 1.
52
+ 2
53
+ Preliminaries and Prior Results
54
+ Definition 1. Two finite groups G, H are called isoclinic if G/Z(G) ∼= H/Z(H)
55
+ and G′ ∼= H′, and if the diagram below commutes:
56
+ G/Z(G) × G/Z(G)
57
+ H/Z(H) × H/Z(H)
58
+ G′
59
+ H′
60
+ Isoclinism preserves nilpotency class and commuting probability [10]. A stem
61
+ group is a group in a given isoclinism class of minimal order. It is well known
62
+ that if G is a stem group, then Z(G) ≤ G′. For more on isoclinism, see [11].
63
+ Below we state existing results in the literature we will need below.
64
+ Lemma 1. [5] R ⊂ Gn,2, where Gn,2 is the set of commuting probabilities of all
65
+ finite nilpotent groups of class at most 2.
66
+ This statement is proved as follows: let R be a finite ring. We can turn
67
+ R⊕R into a nilpotent ring of class 3 by endowing it with the multiplication rule
68
+ (a, x)(b, y) = (0, ab). This ring can be turned into a nilpotent group GR of class
69
+ at most 2 by endowing it with the binary operation a ◦ b = a + b + ab. Both
70
+ of these transformations preserve the commuting probability. Thus the values of
71
+ R \ {1} are a subset of the values PG, running over nilpotent groups G of class
72
+ equal to 2. Note that if R has size n, then the resulting group GR has order n2,
73
+ and if R is noncommutative then the resulting group is nonabelian.
74
+ Lemma 2. [7] Pr(G) =
75
+ 1
76
+ |G′|
77
+
78
+ 1 + |G′|−1
79
+ |G:Z(G)|
80
+
81
+ if and only if G is nilpotent.
82
+ Lemma 3. [6] If G is a nilpotent group, then PG ̸= 1
83
+ p.
84
+ 3
85
+ Results
86
+ Theorem 1.
87
+ 1
88
+ p ̸∈ R for all p ∈ N≥2.
89
+
90
+ On the Commuting Probability of Finite Rings
91
+ 3
92
+ Proof. By Lemma 1, R is contained within the set of commuting probabilities
93
+ of finite nilpotent groups of class at most 2. By Lemma 3, this latter set does
94
+ not contain 1
95
+ p for any prime p.
96
+ Denote the set of commuting probabilities of rings of prime power order for
97
+ some prime p by Rp.
98
+ Proposition 1.
99
+ 1
100
+ n ̸∈ Rp for any prime p ∈ N≥2 and n ∈ N>1.
101
+ Proof. By Lemma 1, we need only consider commuting probabilities of finite
102
+ nilpotent groups of class at most 2. By Lemma 2, we know a fortiori the com-
103
+ muting probability of finite nilpotent groups of class at most 2 in terms of derived
104
+ subgroups and centers. Suppose that for some n ∈ N≥2 we have
105
+ 1
106
+ |G′|
107
+
108
+ 1 +
109
+ |G′| − 1
110
+ |G : Z(G)|
111
+
112
+ = 1
113
+ n.
114
+ By the construction of [5] considered above, wlog let |G| = pe for some even
115
+ positive integer e. Then Z(G) = pf and G′ = pg with 0 < g ≤ f < e (since G is
116
+ at most class 2). Then
117
+ 1
118
+ n = p−g
119
+
120
+ 1 + pg − 1
121
+ pe−f
122
+
123
+ = p−g + pg − 1
124
+ pe−f+g
125
+ (1)
126
+ = pe−f + pg − 1
127
+ pe−f+g
128
+ .
129
+ (2)
130
+ For this to hold, some power ph must divide the numerator, with h > 0; but this
131
+ cannot hold; for if so, then one must have pe−f + pg − 1 = kp, for some integer
132
+ k ̸= 0. But then −1 ≡ 0 mod p, a contradiction.
133
+ Theorem 2.
134
+
135
+ n ̸∈ R for any squarefree n ∈ N>1 and ℓ < n with gcd(ℓ, n) = 1.
136
+ Proof. Any finite ring can be turned into a nilpotent group of class at most
137
+ 2, such that the commuting probability of the ring is equal to the commuting
138
+ probability of the group. The construction (outlined above) turns a commutative
139
+ ring into a group of class 1, and a noncommutative ring into a nonabelian group of
140
+ class at most 2, therefore of class equal to 2. The order of the group is the square
141
+ of the order of the ring, so the Sylow subgroups of the group have order at least
142
+ the square of a prime. Since the group is nilpotent, it can be written as a product
143
+ of its Sylow subgroups, which are all of class at most 2, and the commuting
144
+ probability of the group is the product of the commuting probabilities of its
145
+ Sylow subgroups. Thus it remains to analyse the equation
146
+
147
+ n =
148
+ m
149
+
150
+ i=1
151
+ pei−fi
152
+ i
153
+ + pgi
154
+ i − 1
155
+ pei−fi+gi
156
+ i
157
+ ,
158
+ for m > 1 and the pi distinct, where the ei, fi, and gi are as before. Via isoclinism,
159
+ we may replace GR by a class two nilpotent (stem) group G with identical com-
160
+ muting probability and minimal order. Thus we may assume that Z(GR) = G′
161
+ R
162
+
163
+ 4
164
+ A. Mendelsohn
165
+ (note isoclinism preserves nilpotency class and GR is class two), and moreover
166
+ that none of the Sylow subgroups are abelian. The above equality simplifies to
167
+
168
+ n =
169
+ m
170
+
171
+ i=1
172
+ pe′
173
+ i−f ′
174
+ i
175
+ i
176
+ + pf ′
177
+ i
178
+ i − 1
179
+ p
180
+ e′
181
+ i
182
+ i
183
+ ,
184
+ where the exponents e′
185
+ i, f ′
186
+ i correspond to the group G. We now proceed by in-
187
+ duction on the number of prime factors of |GR|, denoted m.
188
+ By Lemma 14 of [9], if PGR = ℓ
189
+ n in lowest terms, the prime factors of n are
190
+ precisely the prime factors of |GR|. If m = 1, it is known that
191
+
192
+ n ̸∈ Rq for any
193
+ prime q and square-free n (in fact, we know this to hold for m ≤ 69 by Theorem
194
+ 9 of [4]). Suppose the statement is true up to n = k − 1, and consider the case
195
+ n = k; suppose, for a contradiction, that the commuting probability is equal to
196
+
197
+ n, for some square-free integer n and ℓ ≤ n with gcd(ℓ, n) = 1, and without loss
198
+ of generality that n has prime factors equal to the set of pi, i = 1, ..., k:
199
+
200
+ n =
201
+ k
202
+
203
+ i=1
204
+ pe′
205
+ i−f ′
206
+ i
207
+ i
208
+ + pf ′
209
+ i
210
+ i − 1
211
+ p
212
+ e′
213
+ i
214
+ i
215
+ .
216
+ Rearrange for the following:
217
+ ℓ · pe′
218
+ k
219
+ k
220
+ n · (p
221
+ e′
222
+ k−f ′
223
+ k
224
+ k
225
+ + p
226
+ f ′
227
+ k
228
+ k − 1)
229
+ =
230
+ k−1
231
+
232
+ i=1
233
+ pe′
234
+ i−f ′
235
+ i
236
+ i
237
+ + pf ′
238
+ i
239
+ i − 1
240
+ p
241
+ e′
242
+ i
243
+ i
244
+ .
245
+ Writing the left hand side in lowest terms, we have
246
+ ℓ · pe′′
247
+ k
248
+ k
249
+ n′ · (p
250
+ e′
251
+ k−f ′
252
+ k
253
+ k
254
+ + p
255
+ f ′
256
+ k
257
+ k − 1)
258
+ =
259
+ k−1
260
+
261
+ i=1
262
+ pe′
263
+ i−f ′
264
+ i
265
+ i
266
+ + pf ′
267
+ i
268
+ i − 1
269
+ p
270
+ e′
271
+ i
272
+ i
273
+ ,
274
+ where n′ is not divisible by pk. We have a commuting probability on the right
275
+ hand side with k−1 prime factors; so by the induction hypothesis, the denomina-
276
+ tor of the left hand side has no square factors. But we also have pe′
277
+ k−f ′
278
+ k
279
+ k
280
+ + pf ′
281
+ k
282
+ k − 1
283
+ on the denominator of the left hand side, which is not divisible by pk, and by
284
+ Lemma 14 of [9] must have prime factors equal to the set of pi, for i = 1, ..., k−1;
285
+ moreover, there can be no cancellation between these factors and ℓ, by assump-
286
+ tion on ℓ. But then for at least one index j, n′ · (pe′
287
+ k−f ′
288
+ k
289
+ k
290
+ + pf ′
291
+ k
292
+ k − 1) has a prime
293
+ factor pj with multiplicity at least two, which is a contradiction.
294
+ Remark 1. Since 1
295
+ p is an accumulation point of Rp, 1
296
+ n is an accumulation point
297
+ of R for all n. The above result thus means that many accumulation points of
298
+ R are not contained in R. As well as resolving the first conjecture stated at the
299
+ beginning of this note, the result also makes progress on the sixth conjecture
300
+ stated.
301
+
302
+ On the Commuting Probability of Finite Rings
303
+ 5
304
+ References
305
+ 1. Joseph,
306
+ K.
307
+ Several
308
+ Conjectures
309
+ on
310
+ Commutativity
311
+ in
312
+ Algebraic
313
+ Struc-
314
+ tures.
315
+ The
316
+ American
317
+ Mathematical
318
+ Monthly.
319
+ 84,
320
+ 550-551
321
+ (1977),
322
+ https://doi.org/10.1080/00029890.1977.11994411.
323
+ 2. Eberhard, S. Commuting probabilities of finite groups. Bulletin Of The London
324
+ Mathematical Society. 47, 796-808 (2015).
325
+ 3. Browning, T. Limit Points of Commuting Probabilities of Finite Groups. (arXiv,
326
+ 2022), https://arxiv.org/abs/2201.09402.
327
+ 4. Buckley,
328
+ S.
329
+ &
330
+ MacHale,
331
+ D.
332
+ Contrasting
333
+ the
334
+ commut-
335
+ ing
336
+ probabilities
337
+ of
338
+ groups
339
+ and
340
+ rings.
341
+ (arXiv,
342
+ 2014),
343
+ https://archive.maths.nuim.ie/staff/sbuckley/Papers/bm_g-vs-r.pdf.
344
+ 5. Jur´a˘s, M. & Ursul, M. On commuting probabilities in finite groups and rings.
345
+ Journal Of Algebra Combinatorics Discrete Structures And Applications. (2022),
346
+ https://jacodesmath.com/index.php/jacodesmath/article/view/148.
347
+ 6. Castelaz, A. Commutativity Degree of Finite Groups. (Wake Forest University,
348
+ 2010), https://books.google.co.uk/books?id=QYxBnQAACAAJ.
349
+ 7. Nath, R. & Das, A. On a lower bound of commutativity degree. Rendiconti Del
350
+ Circolo Matematico Di Palermo. 59 pp. 137-142 (2010, 4).
351
+ 8. Buckley,
352
+ S.,
353
+ MacHale,
354
+ D.
355
+ &
356
+ Sh´e,
357
+ A.
358
+ Finite
359
+ rings
360
+ with
361
+ many
362
+ commuting
363
+ pairs
364
+ of
365
+ elements.
366
+ (2014),
367
+ https://archive.maths.nuim.ie/staff/sbuckley/Papers/bms.pdf.
368
+ 9. Buckley,
369
+ S.
370
+ &
371
+ MacHale,
372
+ D.
373
+ Groups
374
+ with
375
+ Pr(G)
376
+ =
377
+ 1/3,
378
+ https://archive.maths.nuim.ie/staff/sbuckley/Papers/bm_GpCP_1_3.pdf.
379
+ 10. Lescot, P. Isoclinism Classes and Commutativity Degrees of Finite Groups. Journal
380
+ Of Algebra. 177, 847-869 (1995).
381
+ 11. Berkovich, Y. Groups of Prime Power Order, Volume 1. (De Gruyter, 2008),
382
+ https://doi.org/10.1515/9783110208221.
383
+
Z9A0T4oBgHgl3EQfF_-o/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf,len=188
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
3
+ page_content='02041v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
4
+ page_content='RA] 5 Jan 2023 A Note On Square-free Commuting Probabilities of Finite Rings Andrew Mendelsohn Department of EEE, Imperial College London, London, SW7 2AZ, United Kingdom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
5
+ page_content=' andrew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
6
+ page_content='mendelsohn18@imperial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
7
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
8
+ page_content='uk Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
9
+ page_content=' It is shown that the commuting probability of a finite ring cannot be a fraction with square-free denominator, resolving a conjecture of Buckley and MacHale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
10
+ page_content=' 1 Introduction Let G be a finite group and U denote the uniform distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
11
+ page_content=' Let the commut- ing probability of G, PG, be denoted PG := Pra,b←U(G)(ab = ba).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
12
+ page_content=' An alternative characterisation is PG = 1 |G|2 {(x, y) ∈ G2 : xy = yx}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
13
+ page_content=' Joseph made the following conjectures, where G is the set of all commuting probabilities of finite groups [1]: A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
14
+ page_content=' All limit points of G are rational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
15
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
16
+ page_content=' The set G is well ordered by >.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
17
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
18
+ page_content=' The set 0 ∪ G is closed (that is, contains all its accumulation points).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
19
+ page_content=' In [2], Eberhard resolved conjectures A and B, and in [3] conjecture C was resolved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
20
+ page_content=' One can extend the definition of commuting probability to finite rings: set PR = 1 |R|2 |{(x, y) ∈ R2 : xy = yx}| = 1 |R|2 |{(x, y) ∈ R2 : xy − yx = 0}|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
21
+ page_content=' In [4], the following conjectures were made, where R is the set PR over all finite rings R: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
22
+ page_content=' 1/n ̸∈ R when n ∈ N is square-free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
23
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
24
+ page_content=' R ⊂ G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
25
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
26
+ page_content=' R coincides with the set of values of PG as G ranges over all finite nilpotent groups of class at most 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
27
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
28
+ page_content=' All limit points of R are rational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
29
+ page_content=' 2 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
30
+ page_content=' Mendelsohn 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
31
+ page_content=' For each 0 < t ≤ 1, there exists ǫt > 0 such that R ∩ (t − ǫt, t) = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
32
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
33
+ page_content=' R does not contain any of its accumulation points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
34
+ page_content=' Note conjectures 4, 5, and 6 correspond to Joseph’s conjectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
35
+ page_content=' Moreover, con- jectures 4 and 5 would follow from the veracity of conjectures 2 or 3, since Eberhard showed G has rational limit points and is well-ordered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
36
+ page_content=' In [5], conjec- ture 2 was in fact resolved, and thus conjectures 4 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
37
+ page_content=' Moreover, conjecture 3 was partially resolved: the authors obtained that R is a subset of the set of values of PG as G ranges over all finite nilpotent groups of class at most 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
38
+ page_content=' We conclude that conjectures 1, 3, and 6 are open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
39
+ page_content=' In this work, we resolve conjecture 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
40
+ page_content=' 2 Preliminaries and Prior Results Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
41
+ page_content=' Two finite groups G, H are called isoclinic if G/Z(G) ∼= H/Z(H) and G′ ∼= H′, and if the diagram below commutes: G/Z(G) × G/Z(G) H/Z(H) × H/Z(H) G′ H′ Isoclinism preserves nilpotency class and commuting probability [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
42
+ page_content=' A stem group is a group in a given isoclinism class of minimal order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
43
+ page_content=' It is well known that if G is a stem group, then Z(G) ≤ G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
44
+ page_content=' For more on isoclinism, see [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
45
+ page_content=' Below we state existing results in the literature we will need below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
46
+ page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
47
+ page_content=' [5] R ⊂ Gn,2, where Gn,2 is the set of commuting probabilities of all finite nilpotent groups of class at most 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
48
+ page_content=' This statement is proved as follows: let R be a finite ring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
49
+ page_content=' We can turn R⊕R into a nilpotent ring of class 3 by endowing it with the multiplication rule (a, x)(b, y) = (0, ab).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
50
+ page_content=' This ring can be turned into a nilpotent group GR of class at most 2 by endowing it with the binary operation a ◦ b = a + b + ab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
51
+ page_content=' Both of these transformations preserve the commuting probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
52
+ page_content=' Thus the values of R \\ {1} are a subset of the values PG, running over nilpotent groups G of class equal to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
53
+ page_content=' Note that if R has size n, then the resulting group GR has order n2, and if R is noncommutative then the resulting group is nonabelian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
54
+ page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
55
+ page_content=' [7] Pr(G) = 1 |G′| � 1 + |G′|−1 |G:Z(G)| � if and only if G is nilpotent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
56
+ page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
57
+ page_content=' [6] If G is a nilpotent group, then PG ̸= 1 p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
58
+ page_content=' 3 Results Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
59
+ page_content=' 1 p ̸∈ R for all p ∈ N≥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
60
+ page_content=' On the Commuting Probability of Finite Rings 3 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
61
+ page_content=' By Lemma 1, R is contained within the set of commuting probabilities of finite nilpotent groups of class at most 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
62
+ page_content=' By Lemma 3, this latter set does not contain 1 p for any prime p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
63
+ page_content=' Denote the set of commuting probabilities of rings of prime power order for some prime p by Rp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
64
+ page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
65
+ page_content=' 1 n ̸∈ Rp for any prime p ∈ N≥2 and n ∈ N>1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
66
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
67
+ page_content=' By Lemma 1, we need only consider commuting probabilities of finite nilpotent groups of class at most 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
68
+ page_content=' By Lemma 2, we know a fortiori the com- muting probability of finite nilpotent groups of class at most 2 in terms of derived subgroups and centers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
69
+ page_content=' Suppose that for some n ∈ N≥2 we have 1 |G′| � 1 + |G′| − 1 |G : Z(G)| � = 1 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
70
+ page_content=' By the construction of [5] considered above, wlog let |G| = pe for some even positive integer e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
71
+ page_content=' Then Z(G) = pf and G′ = pg with 0 < g ≤ f < e (since G is at most class 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
72
+ page_content=' Then 1 n = p−g � 1 + pg − 1 pe−f � = p−g + pg − 1 pe−f+g (1) = pe−f + pg − 1 pe−f+g .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
73
+ page_content=' (2) For this to hold, some power ph must divide the numerator, with h > 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
74
+ page_content=' but this cannot hold;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
75
+ page_content=' for if so, then one must have pe−f + pg − 1 = kp, for some integer k ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
76
+ page_content=' But then −1 ≡ 0 mod p, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
77
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
78
+ page_content=' ℓ n ̸∈ R for any squarefree n ∈ N>1 and ℓ < n with gcd(ℓ, n) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
79
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
80
+ page_content=' Any finite ring can be turned into a nilpotent group of class at most 2, such that the commuting probability of the ring is equal to the commuting probability of the group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
81
+ page_content=' The construction (outlined above) turns a commutative ring into a group of class 1, and a noncommutative ring into a nonabelian group of class at most 2, therefore of class equal to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
82
+ page_content=' The order of the group is the square of the order of the ring, so the Sylow subgroups of the group have order at least the square of a prime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
83
+ page_content=' Since the group is nilpotent, it can be written as a product of its Sylow subgroups, which are all of class at most 2, and the commuting probability of the group is the product of the commuting probabilities of its Sylow subgroups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
84
+ page_content=' Thus it remains to analyse the equation ℓ n = m � i=1 pei−fi i + pgi i − 1 pei−fi+gi i , for m > 1 and the pi distinct, where the ei, fi, and gi are as before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
85
+ page_content=' Via isoclinism, we may replace GR by a class two nilpotent (stem) group G with identical com- muting probability and minimal order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
86
+ page_content=' Thus we may assume that Z(GR) = G′ R 4 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
87
+ page_content=' Mendelsohn (note isoclinism preserves nilpotency class and GR is class two), and moreover that none of the Sylow subgroups are abelian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
88
+ page_content=' The above equality simplifies to ℓ n = m � i=1 pe′ i−f ′ i i + pf ′ i i − 1 p e′ i i , where the exponents e′ i, f ′ i correspond to the group G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
89
+ page_content=' We now proceed by in- duction on the number of prime factors of |GR|, denoted m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
90
+ page_content=' By Lemma 14 of [9], if PGR = ℓ n in lowest terms, the prime factors of n are precisely the prime factors of |GR|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
91
+ page_content=' If m = 1, it is known that ℓ n ̸∈ Rq for any prime q and square-free n (in fact, we know this to hold for m ≤ 69 by Theorem 9 of [4]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
92
+ page_content=' Suppose the statement is true up to n = k − 1, and consider the case n = k;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
93
+ page_content=' suppose, for a contradiction, that the commuting probability is equal to ℓ n, for some square-free integer n and ℓ ≤ n with gcd(ℓ, n) = 1, and without loss of generality that n has prime factors equal to the set of pi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
94
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
95
+ page_content=', k: ℓ n = k � i=1 pe′ i−f ′ i i + pf ′ i i − 1 p e′ i i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
96
+ page_content=' Rearrange for the following: ℓ · pe′ k k n · (p e′ k−f ′ k k + p f ′ k k − 1) = k−1 � i=1 pe′ i−f ′ i i + pf ′ i i − 1 p e′ i i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
97
+ page_content=' Writing the left hand side in lowest terms, we have ℓ · pe′′ k k n′ �� (p e′ k−f ′ k k + p f ′ k k − 1) = k−1 � i=1 pe′ i−f ′ i i + pf ′ i i − 1 p e′ i i , where n′ is not divisible by pk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
98
+ page_content=' We have a commuting probability on the right hand side with k−1 prime factors;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
99
+ page_content=' so by the induction hypothesis, the denomina- tor of the left hand side has no square factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
100
+ page_content=' But we also have pe′ k−f ′ k k + pf ′ k k − 1 on the denominator of the left hand side, which is not divisible by pk, and by Lemma 14 of [9] must have prime factors equal to the set of pi, for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
101
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
102
+ page_content=', k−1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
103
+ page_content=' moreover, there can be no cancellation between these factors and ℓ, by assump- tion on ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
104
+ page_content=' But then for at least one index j, n′ · (pe′ k−f ′ k k + pf ′ k k − 1) has a prime factor pj with multiplicity at least two, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
105
+ page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
106
+ page_content=' Since 1 p is an accumulation point of Rp, 1 n is an accumulation point of R for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
107
+ page_content=' The above result thus means that many accumulation points of R are not contained in R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
108
+ page_content=' As well as resolving the first conjecture stated at the beginning of this note, the result also makes progress on the sixth conjecture stated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
109
+ page_content=' On the Commuting Probability of Finite Rings 5 References 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
110
+ page_content=' Joseph, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
111
+ page_content=' Several Conjectures on Commutativity in Algebraic Struc- tures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
112
+ page_content=' The American Mathematical Monthly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
113
+ page_content=' 84, 550-551 (1977), https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
114
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
115
+ page_content='1080/00029890.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
116
+ page_content='1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
117
+ page_content='11994411.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
118
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
119
+ page_content=' Eberhard, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
120
+ page_content=' Commuting probabilities of finite groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
121
+ page_content=' Bulletin Of The London Mathematical Society.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
122
+ page_content=' 47, 796-808 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
123
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
124
+ page_content=' Browning, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
125
+ page_content=' Limit Points of Commuting Probabilities of Finite Groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
126
+ page_content=' (arXiv, 2022), https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
127
+ page_content='org/abs/2201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
128
+ page_content='09402.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
129
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
130
+ page_content=' Buckley, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
131
+ page_content=' & MacHale, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
132
+ page_content=' Contrasting the commut- ing probabilities of groups and rings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
133
+ page_content=' (arXiv, 2014), https://archive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
134
+ page_content='maths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
135
+ page_content='nuim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
136
+ page_content='ie/staff/sbuckley/Papers/bm_g-vs-r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
137
+ page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
138
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
139
+ page_content=' Jur´a˘s, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
140
+ page_content=' & Ursul, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
141
+ page_content=' On commuting probabilities in finite groups and rings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
142
+ page_content=' Journal Of Algebra Combinatorics Discrete Structures And Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
143
+ page_content=' (2022), https://jacodesmath.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
144
+ page_content='com/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
145
+ page_content='php/jacodesmath/article/view/148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
146
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
147
+ page_content=' Castelaz, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
148
+ page_content=' Commutativity Degree of Finite Groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
149
+ page_content=' (Wake Forest University, 2010), https://books.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
150
+ page_content='google.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
151
+ page_content='co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
152
+ page_content='uk/books?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
153
+ page_content='id=QYxBnQAACAAJ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
154
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
155
+ page_content=' Nath, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
156
+ page_content=' & Das, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
157
+ page_content=' On a lower bound of commutativity degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
158
+ page_content=' Rendiconti Del Circolo Matematico Di Palermo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
159
+ page_content=' 59 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
160
+ page_content=' 137-142 (2010, 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
161
+ page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
162
+ page_content=' Buckley, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
163
+ page_content=', MacHale, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
164
+ page_content=' & Sh´e, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
165
+ page_content=' Finite rings with many commuting pairs of elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
166
+ page_content=' (2014), https://archive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
167
+ page_content='maths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
168
+ page_content='nuim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
169
+ page_content='ie/staff/sbuckley/Papers/bms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
170
+ page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
171
+ page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
172
+ page_content=' Buckley, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
173
+ page_content=' & MacHale, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
174
+ page_content=' Groups with Pr(G) = 1/3, https://archive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
175
+ page_content='maths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
176
+ page_content='nuim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
177
+ page_content='ie/staff/sbuckley/Papers/bm_GpCP_1_3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
178
+ page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
179
+ page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
180
+ page_content=' Lescot, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
181
+ page_content=' Isoclinism Classes and Commutativity Degrees of Finite Groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
182
+ page_content=' Journal Of Algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
183
+ page_content=' 177, 847-869 (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
184
+ page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
185
+ page_content=' Berkovich, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
186
+ page_content=' Groups of Prime Power Order, Volume 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
187
+ page_content=' (De Gruyter, 2008), https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
188
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
189
+ page_content='1515/9783110208221.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Z9A0T4oBgHgl3EQfF_-o/content/2301.02041v1.pdf'}
_9E1T4oBgHgl3EQfDAK2/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e37c74e3d625432c9d7ea83dcef86036d515bd41410ba3a589636f185d7db467
3
+ size 1114157
aNE4T4oBgHgl3EQfnw2m/content/tmp_files/2301.05179v1.pdf.txt ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Laser Field Initiation of Higher Order Poles of S-Matrix-Optical Realization of
2
+ Field Theoretic Models*
3
+ G.S. Agarwal1,2
4
+ 1Physical Research Laboratory, Navrangpura, Ahmedabad, India
5
+ 2Jawaharlal Nehru Center for Advanced Scientific Research, Bangalore, India
6
+ We discuss the possibility of converting a simple pole in the radiative decay of a state into a pole of
7
+ higher order by using resonant electromagnetic fields. This process of creation of higher order pole
8
+ is controllable by the intensity of the laser field. We use density matrix and Liouville space and
9
+ present the modification of the Lorentzian line shapes (Breit-Wigner formula) for example to ones
10
+ involving square of Lorentzian and derivatives of Lorentzians.
11
+ INTRODUCTION
12
+ In a classic paper Goldberger and Watson [1] con-
13
+ sidered the possibility that the decay law for an un-
14
+ stable particle can be more complex than a simple
15
+ exponential. They showed the possibility of the ex-
16
+ istence of the poles of S-matrix which were not nec-
17
+ essarily simple poles. Since then, higher order poles
18
+ have been extensively studied. Recently, there is re-
19
+ vival [2,3] of interest in such studies and in particular
20
+ Bhamathi and Sudarshan have analyzed several field
21
+ theoretic models like Friedrich-Lee model, cascade
22
+ model and their extensions. They examine the spec-
23
+ trum (complex) of eigenvalues for such models. A
24
+ related question is how the Breit-Wigner line shape
25
+ formula is modified if S-matrix possess higher order
26
+ poles.
27
+ In this paper we examine the possibility of cre-
28
+ ation of the higher order poles using laser fields. We
29
+ consider the decay of say excited state of an atom.
30
+ Normally this decay is described by the Wigner-
31
+ Weisskopf theory which leads to exponential decay
32
+ law.
33
+ We next discuss the case when the excited
34
+ state is coupled to another state by a resonant elec-
35
+ tromagnetic field. In such a case we show that for
36
+ appropriate value of the intensity of the laser field
37
+ the corresponding spectral function has a pole of or-
38
+ der two. We calculate the resulting line shape and
39
+ discuss the line narrowing etc. We emphasize that
40
+ we work within the framework of density matrices
41
+ and hence we work in Liouville space rather than
42
+ in Hilbert space. We present optical realization of
43
+ various field theoretic models.
44
+ Consider the decay of the state |1⟩ into the states
45
+ |3⟩ and |2⟩ at the rates 2γ1 and 2γ2 respectively as
46
+ shown in Fig.1 (with Gl = 0, △l = 0).
47
+ It is well
48
+ known that the rate of decay of the population in
49
+ |1⟩ is given by
50
+ ρ11(t) = ρ11(0)exp(−2(γ1 + γ2)t).
51
+ (1)
52
+ Here ρ is the density matrix of the atom. The spec-
53
+ trum of the spontaneously emitted photons will con-
54
+ sist of two Lorentzians centered at ω13 and ω12 with
55
+ a half width (γ1 + γ2). Let us concentrate on the
56
+ emission on the transition |1⟩ ↔ |3⟩. The spectrum
57
+ will be described by the well-known form
58
+ S(ω) =
59
+ γ1/π
60
+ (γ1 + γ2)2 + (ω − ω13)2 .
61
+ (2)
62
+ Note that γ2 will be zero if the decay channel |1⟩ →
63
+ |2⟩ is not allowed.
64
+ We will discuss how the laser
65
+ fields could be used to modify significantly the re-
66
+ sults predicted by (1) and (2).
67
+ LIOUVILLE SPACE FORMULATION OF
68
+ DECAY
69
+ We next recall how the spectrum is calculated in
70
+ the density matrix framework [4]. We have included
71
+ this material for completeness so that our discussion
72
+ in subsequent sections can be followed by the non-
73
+ Quantum optics practitioners.
74
+ Consider a system
75
+ with two states |1⟩ and |3⟩ interacting with the vac-
76
+ uum of the electromagnetic field. The Hamiltonian
77
+ can be written in the form
78
+ H = ¯hω13 |1⟩ ⟨1| +
79
+
80
+ ks
81
+ ¯hωksa†
82
+ ksaks + V13
83
+ V13 =
84
+
85
+ ks
86
+ (¯hgksa†
87
+ ks |1⟩ ⟨3| + h.c.).
88
+ (3)
89
+ The vacuum modes are characterized by the propa-
90
+ gation index −→k and the polarization index s. The
91
+ aks, a†
92
+ ks represent annihilation and creation opera-
93
+ tors for the mode −→k s. The V13 describes the decay
94
+ of |1⟩ to |3⟩. The gks is the coupling constant be-
95
+ tween the field mode and the atom.
96
+ We use the
97
+ weak coupling assumption and the flat nature of the
98
+ density of states of the electromagnetic vacuum to
99
+ eliminate the degrees of freedom associated with the
100
+ field vacuum. We derive an equation for the den-
101
+ sity matrix of the atomic system alone which can be
102
+ written in the form
103
+ ∂ρ
104
+ ∂t = Lρ
105
+ (4)
106
+ or in terms of the components as
107
+ ˙ρ11 = −2γ1ρ11,
108
+ ˙ρ13 = −iω13ρ13 − γ1ρ13,
109
+ ˙ρ33 = 2γ1ρ11,
110
+ etc., 2γ1 =
111
+
112
+ ks
113
+ |gks|2δ(ω13 − ωks).
114
+ (5)
115
+ This yields steady state as well as transient behavior.
116
+ The spectrum of radiation is related to the Fourier
117
+ transform of the two time dipole correlation func-
118
+ tion, for example in the above case to
119
+ S(ω) = 1
120
+ π Re[S(z)|z=+iω],
121
+ (6)
122
+ S(z) ≡
123
+ � ∞
124
+ 0
125
+ dτe−zτ ⟨A13(t + τ)A31(t)⟩ ,
126
+ A13 = A†
127
+ 31 = |1⟩ ⟨3| .
128
+ (7)
129
+ The poles of S(z) determine the spectrum. For the
130
+ standard problem S(z) has simple poles.
131
+ The two time correlation function is calculated
132
+ from the solution of (4) and by using the quantum
133
+ ———————————————————————————————————————————————————————
134
+ *Published in “Frontiers of Quantum Optics and Laser Physics”, p.155-165, ed. S.Y. Zhu, M.S. Zubairy and M.O. Scully
135
+ (Springer, 1997). This work on higher order poles of S matrix has close connection to the exceptional point physics. Thus this
136
+ work brings out how the exceptional point physics in active systems can be manipulated by laser field. See also G. S. Agarwal,
137
+ Quantum Optics, Cambridge University Press, 2012, Section 17.3.1.
138
+ arXiv:2301.05179v1 [physics.optics] 12 Jan 2023
139
+
140
+ 2
141
+ regression theorem. For completeness, we state what
142
+ it means. We write the solution of (4) as
143
+ ραβ(t + τ) =
144
+
145
+ m,n
146
+ Gαβ,mn(τ)ρmn(t).
147
+ (8)
148
+ It should be borne in mind that in the Liouville space
149
+ ραβ is an element of the column matrix.
150
+ We can
151
+ rewrite (8) as
152
+ ⟨Aβα(t + τ)⟩ =
153
+
154
+ m,n
155
+ Gαβ,mn(τ) ⟨Anm(t)⟩ ,
156
+ (9)
157
+ then the quantum regression theorem leads to two
158
+ time correlation function:
159
+ ⟨Aβα(t + τ)Apq(t)⟩ ≡
160
+
161
+ m,n
162
+ Gαβ,mn(τ) ⟨Anm(t)Apq(t)⟩
163
+ =
164
+
165
+ m,n
166
+ Gαβ,mn(τ) ⟨Anq(t)⟩ δmp
167
+ =
168
+
169
+ m,n
170
+ Gαβ,mn(τ)δmpρqn(t).
171
+ (10)
172
+ On using (10) in (6) it is clear that S(z) is related to
173
+ the Laplace transform of G(τ) or to (z −L)−1. Gen-
174
+ erally, the Liouvilliean matrix relevant for the cal-
175
+ culation of (10) decomposes in block diagonal form
176
+ and only a part of L determines the decay or the
177
+ spectral line shapes. For the two level example, the
178
+ correlation function is essentially determined by a
179
+ single equation for ρ13. If there is more than one
180
+ decay channel, then additional terms appear in (5),
181
+ for example, for the case shown in Fig.1, γ1 should
182
+ be replaced by (γ1 +γ2) in the two first equations in
183
+ (5).
184
+ Figure 1. Schematic illustration of the scheme that leads
185
+ to the creation of poles of order two in the decay of the
186
+ state |1⟩; which could be pumped in two different ways ei-
187
+ ther from the state |3⟩ or from a state outside the system.
188
+ This provides the realization of the extended Friedrich-
189
+ Lee model.
190
+ CREATION OF A DOUBLE POLE
191
+ We next demonstrate how by using external elec-
192
+ tromagnetic fields we can convert simple poles of L
193
+ into poles of higher order. For this purpose, we con-
194
+ sider the application of an electromagnetic field that
195
+ is tuned close to the transition frequency ω12 [Fig. 1.
196
+ Λ0 = 0, Λ ̸= 0, Gl ̸= 0]. The Hamiltonian describing
197
+ this system can be written as
198
+ H = ¯hω13 |1⟩ ⟨1|+¯h(ω13−ω12) |2⟩ ⟨2|+Hext+V12+V13,
199
+ (11)
200
+ where Vαβ describes the decay on the transition
201
+ |α⟩ → |β⟩ and where
202
+ Hext = −¯h(Gle−iωlt |1⟩ ⟨2| + h.c.),
203
+ (12)
204
+ Gl = (−→d 12 · −→
205
+ E l/¯h).
206
+ (13)
207
+ The parameter 2Gl is the Rabi frequency of the field
208
+ and is a measure of the strength of the laser field
209
+ applied on the transition |1⟩ ↔ |2⟩.
210
+ The Hamil-
211
+ tonian (11) is time-dependent.
212
+ However one can
213
+ make a canonical transformation to reduce it to a
214
+ time-independent Hamiltonian. In the special case
215
+ V12 → 0 the model (11) is equivalent to the ex-
216
+ tended Friedrich-Lee model.
217
+ We have thus pro-
218
+ duced a realization of a field-theoretic model in the
219
+ context of atoms interacting with laser fields.
220
+ In
221
+ our case lasers are used to control the decay pro-
222
+ cess. Note that we have two control parameters ωl
223
+ and Gl, to manipulate the nature of the poles of
224
+ L. The situation shown in Fig. 1 is realizable in
225
+ many atoms, molecules dopants in solid matrices,
226
+ etc. For example, in 87Rb vapor, the states |1⟩, |2⟩
227
+ and |3⟩ could be the states 5P 3
228
+ 2 , 5S 1
229
+ 2 , F = 2 and
230
+ 5S 1
231
+ 2 , F = 1, respectively.
232
+ We eliminate the opti-
233
+ cal frequencies by making canonical transformations
234
+ ρ13 → ρ13e−iω13t, ρ12 → ρ12e−iωlt etc. After canon-
235
+ ical transformations and after eliminating vacuum
236
+ degrees of freedom using the master equation tech-
237
+ niques the density matrix equations read [5]
238
+ ˙ρ11 = −2(γ1 + γ2 + Λ)ρ11 + 2Λρ33 + iGlρ21 − iG∗
239
+ l ρ12,
240
+ ˙ρ22 = 2γ2ρ11 − iGlρ21 + iG∗
241
+ l ρ12,
242
+ ˙ρ21 = −(Γ21 − i∆l)ρ21 − iG∗
243
+ l ρ22 + iG∗
244
+ l ρ11,
245
+ ˙ρ31 = −Γ31ρ31 − iG∗
246
+ l ρ32,
247
+ ˙ρ32 = −(Γ32 + i∆l)ρ32 − iGlρ31.
248
+ (14)
249
+ Here we have also included a pumping parameter λ
250
+ to pump the population from the level |3⟩ to |1⟩.
251
+ The Γ
252
+ ′s
253
+ αβ give the decay of off-diagonal elements ρ
254
+ ′s
255
+ αβ
256
+ of the density matrix and are given by
257
+ Γ31 = γ1 + γ2 + 2Λ, Γ32 = Λ,
258
+ Γ21 = γ1 + γ2 + Λ, ∆2 = ω12 − ωl.
259
+ (15)
260
+ From (14) and the quantum regression theorem we
261
+ derive coupled equations for two time atomic corre-
262
+ lation functions
263
+
264
+ d
265
+ dτ +
266
+
267
+ Γ31
268
+ iG∗
269
+ l
270
+ iGl
271
+ Γ32 + i∆2
272
+ �� �
273
+ ⟨A13(t + τ)A31(t)⟩
274
+ ⟨A23(t + τ)A31(t)⟩
275
+
276
+ = 0.
277
+ (16)
278
+ These are to be solved subject to initial conditions
279
+ ⟨A13A31⟩ = ρ11, ⟨A23A31⟩ = ρ12,
280
+ (17)
281
+ which in turn are determined from the steady state
282
+ solution of (14). Clearly the poles of L that deter-
283
+ mine the spectral characteristics are given by
284
+ P(z) = (z + Γ31)(z + Γ32 + i∆l) + |Gl|2.
285
+ (18)
286
+ The zeroes of (18) for ∆l = 0 are shown in Fig. 2.
287
+ The conditions under which P(z) has double zero
288
+ are
289
+ ∆l = 0, (Γ32 − Γ31)2 = 4|Gl|2.
290
+ (19)
291
+ The double zero z0 occurs at the bifurcation point
292
+ in Fig. 2
293
+ z0 = −1
294
+ 2(Γ31 + Γ32).
295
+ (20)
296
+ We therefore conclude [6] that a simple pole can be
297
+ converted into a double pole in a laboratory experi-
298
+ ment by applying an electromagnetic field resonant
299
+ with the transition |1⟩ ↔ |2⟩ and with Rabi fre-
300
+ quency equal to |Γ31 − Γ32|.
301
+
302
+ [1)
303
+ V
304
+ W1, G
305
+ Y1
306
+ [2)
307
+ [3]3
308
+ Figure 2. Motion of the zeroes of (18) for Γ31 = 1, Γ32 =
309
+ 0.2.
310
+ Note the presence of the bifurcation point.
311
+ This
312
+ is precisely the point where we create a pole of order
313
+ two. The solid curve represents Im(z) + 0.6 whereas the
314
+ dashed curve gives Re(z).
315
+ LINE SHAPES AND DOUBLE POLES
316
+ The line shape can be calculated from the solution
317
+ of (16) and (6):
318
+ S(ω) ≡ ρ11Re[
319
+ (γ2 + Γ32 − iδ)
320
+ (Γ31 − iδ)(Γ32 − iδ) + |Gl|2 ]
321
+ (21)
322
+ which under the double pole condition 2|Gl| = |Γ31−
323
+ Γ32| reduces to
324
+ S(ω) = ρ11Re[γ2 + Γ32 − iδ
325
+ (−iδ + γ0)2 ]
326
+ = ρ11
327
+ δ2(γ1 + 2Λ) + γ2
328
+ 0(γ2 + Λ)
329
+ (δ2 + γ2
330
+ 0)2
331
+ ,
332
+ γ0 = 1
333
+ 2(γ1 + γ2 + 3Λ).
334
+ (22)
335
+ This is the modification of the line shape formula.
336
+ Note the double hump structure of the line shape.
337
+ Note further the sensitiveness of S(ω) to the pump-
338
+ ing parameter Λ. In the limit γ2 → 0 and Λ ≪ γ1,
339
+ (22) reduces to
340
+ S(ω) ≡ ρ11
341
+ γ1(δ2 + γ1
342
+ 4 Λ)
343
+ (δ2 + γ2
344
+ 1
345
+ 4 )2
346
+ (23)
347
+ It is also interesting to note, that the scale param-
348
+ eter is now (γ1/2) rather than γ1. Thus the total
349
+ line shape is a sum of (a) Square of the Lorentzian
350
+ (b) derivative of the Lorentzian (ζ/(ζ + γ0)2 ≡
351
+ −ζ ∂
352
+ ∂ζ (
353
+ 1
354
+ ζ+γ )).
355
+ It is possible to consider an alternate model of
356
+ pumping obtained by setting Λ = 0 in Fig. 1. As-
357
+ suming that γ2 = 0, one can show that instead of
358
+ (23) the spectral line shape is now given by
359
+ S(ω) ≡
360
+ γ1ρ11δ2
361
+ (δ2 + γ2
362
+ 1
363
+ 4 )2 = (−δ ∂
364
+ ∂δ ) (γ1/2)ρ11
365
+ (δ2 + γ2
366
+ 1/4)
367
+ (24)
368
+ which is shown in Fig.
369
+ 3.
370
+ The figure also shows
371
+ for comparison the Breit-Wigner formula (2) Note
372
+ the double hump structure of the line shape. The
373
+ maxima now occur at δ = ±γ1/2. From Eq. (14)
374
+ we can also compute the time dependence of ρ11(t)
375
+ under the condition of a double pole. The result is
376
+ ρ11(t) = (1 − γ1t
377
+ 2 )2e−γ1t
378
+ (25)
379
+ It is again interesting to note that the time scale
380
+ is governed by γ1/2 rather than γ1.
381
+ The basic idea presented above is easily extended
382
+ to more complex situations.
383
+ For example, two-
384
+ photon decay in the system as shown in figure 4
385
+ Figure 3.
386
+ The modified line shape (24) (dashed) as
387
+ a function of δ/γ1 and its comparison with the Breit-
388
+ Wigner line shape (solid).
389
+ which is easily realizable atoms and molecules. The
390
+ full Hamiltonian for this system can be written as
391
+ H = ¯hω13 |1⟩ ⟨1| + ¯hω23 |2⟩ ⟨2| + ¯hω43 |4⟩ ⟨4|
392
+ − ¯h(Gle−iωlt |4⟩ ⟨2| + h.c.)
393
+ +
394
+
395
+ ks
396
+ ¯hωksa†
397
+ kxaks + V12 + V23 + V42
398
+ (26)
399
+ where the meaning of different terms is obvious.
400
+ Again a canonical transformation will change the
401
+ above H into a time-independent H. For V42 → 0,
402
+ the above Hamiltonian becomes identical to the one
403
+ for the quantum field theoretic extended cascade
404
+ field model. We thus have a simple atomic realiza-
405
+ tion of the field-theoretic model. As shown recently
406
+ [7], this system exhibits very interesting two photon
407
+ absorption characteristics. Clearly, the electromag-
408
+ netic coupling between the levels |2⟩ and |4⟩ can pro-
409
+ duce a double pole in the decay of the system. It is
410
+ interesting that a system equivalent to this has been
411
+ studied by Bhamathi and Sudarshan [2].
412
+ Figure 4. A scheme involving laser coupling the inter-
413
+ mediate state |2⟩ which will create pole of order two in
414
+ the two-photon decay. This provides an analog of the
415
+ extended cascade model.
416
+ DOUBLE POLES AND INTERFERENCE
417
+ EFFECTS
418
+ The existence of double poles and the possibility
419
+ of a line shape which is a derivative of Lorentzian
420
+ suggest that the quantum interferences must be
421
+ crucial.
422
+ This is indeed the case as can be seen
423
+ from the following considerations. The electromag-
424
+ netic coupling of |1⟩ and |2⟩ produced dressed states
425
+ |ψ±⟩ =
426
+ 1
427
+
428
+ 2(± |1⟩ + |2⟩) with eigenvalues ±Gl. Since
429
+ Gl ∼ γ, the two states are within the radiative
430
+ line width. We pump the population in the state
431
+ |1⟩ which is equivalent to pumping in both |ψ±⟩ as
432
+
433
+ 2.0
434
+ 1.5
435
+ 1.0
436
+ .......
437
+ .=
438
+ 0.5
439
+ 0.0
440
+ -0.5
441
+ -1.0
442
+ 0.0
443
+ 0.2
444
+ 0.4
445
+ 0.6
446
+ 0.8
447
+ 1.0
448
+ Gt1.0
449
+ 1
450
+ -
451
+ -
452
+ 0.8
453
+ -
454
+ -
455
+ -
456
+ -
457
+ -
458
+ 1
459
+ 1
460
+ 0.6
461
+ -
462
+ 1
463
+ 1
464
+ S
465
+ 1
466
+ -
467
+ 1
468
+ 1
469
+ 1
470
+ 0.4
471
+ 1
472
+ 1
473
+ 1
474
+ 0.2
475
+ I:
476
+ 0.0
477
+ -2
478
+ 0
479
+ 2
480
+ 4
481
+ S|1)
482
+ 14)
483
+ Laser Coupling
484
+ [2)
485
+ 3)4
486
+ |1⟩ = (|ψ+⟩ + |ψ−⟩)/
487
+
488
+ 2. Both states |ψ±⟩ can de-
489
+ cay to |3⟩ as |ψ±⟩ involve admixtures of |1⟩ and |2⟩.
490
+ These two decays will not be independent [8,9] as
491
+ −→d +3 · −→d ∗
492
+ −3 ̸= 0 and as Gl ∼ γ.
493
+ EXPONENTIAL DECAY RECOVERED
494
+ We also examine the initial conditions for our sys-
495
+ tem which would result in exponential decay. From
496
+ Eq. (16) it is seen that
497
+ d
498
+ dτ ⟨(A13(t + τ) + iA23(t + τ))A31(t)⟩
499
+ + γ1
500
+ 2 ⟨(A13(t + τ) + iA23(t + τ))A31(t)⟩ = 0 (27)
501
+ if Gl = γ1
502
+ 2 , γ2 → 0. Thus the correlation function
503
+ defined in terms of the vector ˜ψ =
504
+ 1
505
+
506
+ 2(|1⟩ + i |2⟩)
507
+ obeys simple exponential decay law with a time scale
508
+ governed by γ1/2 rather than γ1:
509
+
510
+ A ˜
511
+ ψ3(t + τ)A3 ˜
512
+ ψ(t)
513
+
514
+ = e−γ1τ/2 �
515
+ A ˜
516
+ ψ ˜
517
+ ψ(t)
518
+
519
+ (28)
520
+ Thus a pumping of the system to the state ˜ψ rather
521
+ than |1⟩ will result in exponential decay[10].
522
+ Thus, in conclusion, we have shown how higher-
523
+ order poles in the decay of states can be produced
524
+ by using resonant electromagnetic fields. We demon-
525
+ strated this by creating a pole of order two. Clearly,
526
+ the technique is quite versatile and by using combi-
527
+ nations of electromagnetic fields we can create poles
528
+ of higher order.
529
+ I thank George Sudarshan for discussions on
530
+ higher order poles of S-Matrix and R.P. Singh for
531
+ help in preparation of this paper.
532
+ [1] M.L. Goldberger and K.M. Watson, Phys. Rev. 136,
533
+ B 1472 (1964); J, S. Bell and C.J. Goebel, Phys,
534
+ Rev. 138, B 1198 (1965)
535
+ [2] G. Bhamathi and E.C.G. Sudarshan, Int. J. Mod
536
+ Phys. B 10, 1531 (1996); see also E.C.G Sudarshan,
537
+ Phys. Rev. A 50, 2006 (1994); E.C.G. Sudarshan,
538
+ C.B. Chiu and G. Bhamathi, Phys. Rev. D 46, 3508
539
+ (1992).
540
+ [3] A. Bohm, S. Maxson and M. Loewe, Physica A, in
541
+ press; A. Mondragon and E. Her- nandez, J. Phys.
542
+ A 26, 5595 (1993); C. Puntmann, paper presented
543
+ at the International Colloquium on Group Theory,
544
+ Goslar, Germany 1996.
545
+ [4] G.S. Agarwal, Quantum Optics (Springer-Verlag,
546
+ Berlin, 1974).
547
+ [5] G.S. Agarwal, Phys. Rev. A 54, Rapid Commun.
548
+ 3734 (1996).
549
+ [6] An almost trivial case occurs when Γ23 = 0, γ2 = 0
550
+ and no pumping (Λ = 0). The atom can start in
551
+ state |1⟩. Then one can work with a nonhermitian
552
+ Hamiltonian
553
+
554
+ −iγ1 G
555
+ G
556
+ 0
557
+
558
+ which has identical real
559
+ eigenvalues if 2G = γ1. This case has been pre-
560
+ viously considered in literature (H. Steudel, Ann.
561
+ Physik 22, 113 (1969).
562
+ [7] G.S. Agarwal and W. Harshawardhan, Phys. Rev,
563
+ Lett. 77, 1039 (1996).
564
+ [8] G.S. Agarwal, Quantum Optics (Springer-Verlag,
565
+ Berlin, 1974) pp. 94-96.
566
+ [9] A. Imamoglu, Phys. Rev. A 40, 2835 (1989); S.Y.
567
+ Zhu and M.O. Scully, Phys. Rev. Lett. 76, 388
568
+ (1996); D.A. Cardimona, M.G. Raymer and C.R.
569
+ Stroud Jr., J. Phys. B 15, 55 (1982).
570
+ [10] Pumping the system in the state ≫ is possible using
571
+ an excitation pulse with phase switching at appro-
572
+ priate instant (cf. Y.S. Bai, A.G. Yodh and T.W.
573
+ Mossberg, Phys. Rev. Lett. 55, 1277 (1984)).
574
+
aNE4T4oBgHgl3EQfnw2m/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf,len=288
2
+ page_content='Laser Field Initiation of Higher Order Poles of S-Matrix-Optical Realization of Field Theoretic Models* G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
3
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
4
+ page_content=' Agarwal1,2 1Physical Research Laboratory, Navrangpura, Ahmedabad, India 2Jawaharlal Nehru Center for Advanced Scientific Research, Bangalore, India We discuss the possibility of converting a simple pole in the radiative decay of a state into a pole of higher order by using resonant electromagnetic fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
5
+ page_content=' This process of creation of higher order pole is controllable by the intensity of the laser field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
6
+ page_content=' We use density matrix and Liouville space and present the modification of the Lorentzian line shapes (Breit-Wigner formula) for example to ones involving square of Lorentzian and derivatives of Lorentzians.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
7
+ page_content=' INTRODUCTION In a classic paper Goldberger and Watson [1] con- sidered the possibility that the decay law for an un- stable particle can be more complex than a simple exponential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
8
+ page_content=' They showed the possibility of the ex- istence of the poles of S-matrix which were not nec- essarily simple poles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
9
+ page_content=' Since then, higher order poles have been extensively studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
10
+ page_content=' Recently, there is re- vival [2,3] of interest in such studies and in particular Bhamathi and Sudarshan have analyzed several field theoretic models like Friedrich-Lee model, cascade model and their extensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
11
+ page_content=' They examine the spec- trum (complex) of eigenvalues for such models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
12
+ page_content=' A related question is how the Breit-Wigner line shape formula is modified if S-matrix possess higher order poles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
13
+ page_content=' In this paper we examine the possibility of cre- ation of the higher order poles using laser fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
14
+ page_content=' We consider the decay of say excited state of an atom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
15
+ page_content=' Normally this decay is described by the Wigner- Weisskopf theory which leads to exponential decay law.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
16
+ page_content=' We next discuss the case when the excited state is coupled to another state by a resonant elec- tromagnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
17
+ page_content=' In such a case we show that for appropriate value of the intensity of the laser field the corresponding spectral function has a pole of or- der two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
18
+ page_content=' We calculate the resulting line shape and discuss the line narrowing etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
19
+ page_content=' We emphasize that we work within the framework of density matrices and hence we work in Liouville space rather than in Hilbert space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
20
+ page_content=' We present optical realization of various field theoretic models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
21
+ page_content=' Consider the decay of the state |1⟩ into the states |3⟩ and |2⟩ at the rates 2γ1 and 2γ2 respectively as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
22
+ page_content='1 (with Gl = 0, △l = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
23
+ page_content=' It is well known that the rate of decay of the population in |1⟩ is given by ρ11(t) = ρ11(0)exp(−2(γ1 + γ2)t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
24
+ page_content=' (1) Here ρ is the density matrix of the atom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
25
+ page_content=' The spec- trum of the spontaneously emitted photons will con- sist of two Lorentzians centered at ω13 and ω12 with a half width (γ1 + γ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
26
+ page_content=' Let us concentrate on the emission on the transition |1⟩ ↔ |3⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
27
+ page_content=' The spectrum will be described by the well-known form S(ω) = γ1/π (γ1 + γ2)2 + (ω − ω13)2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
28
+ page_content=' (2) Note that γ2 will be zero if the decay channel |1⟩ → |2⟩ is not allowed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
29
+ page_content=' We will discuss how the laser fields could be used to modify significantly the re- sults predicted by (1) and (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
30
+ page_content=' LIOUVILLE SPACE FORMULATION OF DECAY We next recall how the spectrum is calculated in the density matrix framework [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
31
+ page_content=' We have included this material for completeness so that our discussion in subsequent sections can be followed by the non- Quantum optics practitioners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
32
+ page_content=' Consider a system with two states |1⟩ and |3⟩ interacting with the vac- uum of the electromagnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
33
+ page_content=' The Hamiltonian can be written in the form H = ¯hω13 |1⟩ ⟨1| + � ks ¯hωksa† ksaks + V13 V13 = � ks (¯hgksa† ks |1⟩ ⟨3| + h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
34
+ page_content='c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
35
+ page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
36
+ page_content=' (3) The vacuum modes are characterized by the propa- gation index −→k and the polarization index s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
37
+ page_content=' The aks, a† ks represent annihilation and creation opera- tors for the mode −→k s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
38
+ page_content=' The V13 describes the decay of |1⟩ to |3⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
39
+ page_content=' The gks is the coupling constant be- tween the field mode and the atom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
40
+ page_content=' We use the weak coupling assumption and the flat nature of the density of states of the electromagnetic vacuum to eliminate the degrees of freedom associated with the field vacuum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
41
+ page_content=' We derive an equation for the den- sity matrix of the atomic system alone which can be written in the form ∂ρ ∂t = Lρ (4) or in terms of the components as ˙ρ11 = −2γ1ρ11, ˙ρ13 = −iω13ρ13 − γ1ρ13, ˙ρ33 = 2γ1ρ11, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
42
+ page_content=', 2γ1 = � ks |gks|2δ(ω13 − ωks).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
43
+ page_content=' (5) This yields steady state as well as transient behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
44
+ page_content=' The spectrum of radiation is related to the Fourier transform of the two time dipole correlation func- tion, for example in the above case to S(ω) = 1 π Re[S(z)|z=+iω], (6) S(z) ≡ � ∞ 0 dτe−zτ ⟨A13(t + τ)A31(t)⟩ , A13 = A† 31 = |1⟩ ⟨3| .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
45
+ page_content=' (7) The poles of S(z) determine the spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
46
+ page_content=' For the standard problem S(z) has simple poles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
47
+ page_content=' The two time correlation function is calculated from the solution of (4) and by using the quantum ——————————————————————————————————————————————————————— Published in “Frontiers of Quantum Optics and Laser Physics”, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
48
+ page_content='155-165, ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
49
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
50
+ page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
51
+ page_content=' Zhu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
52
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
53
+ page_content=' Zubairy and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
54
+ page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
55
+ page_content=' Scully (Springer, 1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
56
+ page_content=' This work on higher order poles of S matrix has close connection to the exceptional point physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
57
+ page_content=' Thus this work brings out how the exceptional point physics in active systems can be manipulated by laser field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
58
+ page_content=' See also G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
59
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
60
+ page_content=' Agarwal, Quantum Optics, Cambridge University Press, 2012, Section 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
61
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
62
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
63
+ page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
64
+ page_content='05179v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
65
+ page_content='optics] 12 Jan 2023 2 regression theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
66
+ page_content=' For completeness, we state what it means.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
67
+ page_content=' We write the solution of (4) as ραβ(t + τ) = � m,n Gαβ,mn(τ)ρmn(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
68
+ page_content=' (8) It should be borne in mind that in the Liouville space ραβ is an element of the column matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
69
+ page_content=' We can rewrite (8) as ⟨Aβα(t + τ)⟩ = � m,n Gαβ,mn(τ) ⟨Anm(t)⟩ , (9) then the quantum regression theorem leads to two time correlation function: ⟨Aβα(t + τ)Apq(t)⟩ ≡ � m,n Gαβ,mn(τ) ⟨Anm(t)Apq(t)⟩ = � m,n Gαβ,mn(τ) ⟨Anq(t)⟩ δmp = � m,n Gαβ,mn(τ)δmpρqn(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
70
+ page_content=' (10) On using (10) in (6) it is clear that S(z) is related to the Laplace transform of G(τ) or to (z −L)−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
71
+ page_content=' Gen- erally, the Liouvilliean matrix relevant for the cal- culation of (10) decomposes in block diagonal form and only a part of L determines the decay or the spectral line shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
72
+ page_content=' For the two level example, the correlation function is essentially determined by a single equation for ρ13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
73
+ page_content=' If there is more than one decay channel, then additional terms appear in (5), for example, for the case shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
74
+ page_content='1, γ1 should be replaced by (γ1 +γ2) in the two first equations in (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
75
+ page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
76
+ page_content=' Schematic illustration of the scheme that leads to the creation of poles of order two in the decay of the state |1⟩;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
77
+ page_content=' which could be pumped in two different ways ei- ther from the state |3⟩ or from a state outside the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
78
+ page_content=' This provides the realization of the extended Friedrich- Lee model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
79
+ page_content=' CREATION OF A DOUBLE POLE We next demonstrate how by using external elec- tromagnetic fields we can convert simple poles of L into poles of higher order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
80
+ page_content=' For this purpose, we con- sider the application of an electromagnetic field that is tuned close to the transition frequency ω12 [Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
81
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
82
+ page_content=' Λ0 = 0, Λ ̸= 0, Gl ̸= 0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
83
+ page_content=' The Hamiltonian describing this system can be written as H = ¯hω13 |1⟩ ⟨1|+¯h(ω13−ω12) |2⟩ ⟨2|+Hext+V12+V13, (11) where Vαβ describes the decay on the transition |α⟩ → |β⟩ and where Hext = −¯h(Gle−iωlt |1⟩ ⟨2| + h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
84
+ page_content='c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
85
+ page_content='), (12) Gl = (−→d 12 · −→ E l/¯h).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
86
+ page_content=' (13) The parameter 2Gl is the Rabi frequency of the field and is a measure of the strength of the laser field applied on the transition |1⟩ ↔ |2⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
87
+ page_content=' The Hamil- tonian (11) is time-dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
88
+ page_content=' However one can make a canonical transformation to reduce it to a time-independent Hamiltonian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
89
+ page_content=' In the special case V12 → 0 the model (11) is equivalent to the ex- tended Friedrich-Lee model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
90
+ page_content=' We have thus pro- duced a realization of a field-theoretic model in the context of atoms interacting with laser fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
91
+ page_content=' In our case lasers are used to control the decay pro- cess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
92
+ page_content=' Note that we have two control parameters ωl and Gl, to manipulate the nature of the poles of L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
93
+ page_content=' The situation shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
94
+ page_content=' 1 is realizable in many atoms, molecules dopants in solid matrices, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
95
+ page_content=' For example, in 87Rb vapor, the states |1⟩, |2⟩ and |3⟩ could be the states 5P 3 2 , 5S 1 2 , F = 2 and 5S 1 2 , F = 1, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
96
+ page_content=' We eliminate the opti- cal frequencies by making canonical transformations ρ13 → ρ13e−iω13t, ρ12 → ρ12e−iωlt etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
97
+ page_content=' After canon- ical transformations and after eliminating vacuum degrees of freedom using the master equation tech- niques the density matrix equations read [5] ˙ρ11 = −2(γ1 + γ2 + Λ)ρ11 + 2Λρ33 + iGlρ21 − iG∗ l ρ12, ˙ρ22 = 2γ2ρ11 − iGlρ21 + iG∗ l ρ12, ˙ρ21 = −(Γ21 − i∆l)ρ21 − iG∗ l ρ22 + iG∗ l ρ11, ˙ρ31 = −Γ31ρ31 − iG∗ l ρ32, ˙ρ32 = −(Γ32 + i∆l)ρ32 − iGlρ31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
98
+ page_content=' (14) Here we have also included a pumping parameter λ to pump the population from the level |3⟩ to |1⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
99
+ page_content=' The Γ ′s αβ give the decay of off-diagonal elements ρ ′s αβ of the density matrix and are given by Γ31 = γ1 + γ2 + 2Λ, Γ32 = Λ, Γ21 = γ1 + γ2 + Λ, ∆2 = ω12 − ωl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
100
+ page_content=' (15) From (14) and the quantum regression theorem we derive coupled equations for two time atomic corre- lation functions � d dτ + � Γ31 iG∗ l iGl Γ32 + i∆2 �� � ⟨A13(t + τ)A31(t)⟩ ⟨A23(t + τ)A31(t)⟩ � = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
101
+ page_content=' (16) These are to be solved subject to initial conditions ⟨A13A31⟩ = ρ11, ⟨A23A31⟩ = ρ12, (17) which in turn are determined from the steady state solution of (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
102
+ page_content=' Clearly the poles of L that deter- mine the spectral characteristics are given by P(z) = (z + Γ31)(z + Γ32 + i∆l) + |Gl|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
103
+ page_content=' (18) The zeroes of (18) for ∆l = 0 are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
104
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
105
+ page_content=' The conditions under which P(z) has double zero are ∆l = 0, (Γ32 − Γ31)2 = 4|Gl|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
106
+ page_content=' (19) The double zero z0 occurs at the bifurcation point in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
107
+ page_content=' 2 z0 = −1 2(Γ31 + Γ32).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
108
+ page_content=' (20) We therefore conclude [6] that a simple pole can be converted into a double pole in a laboratory experi- ment by applying an electromagnetic field resonant with the transition |1⟩ ↔ |2⟩ and with Rabi fre- quency equal to |Γ31 − Γ32|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
109
+ page_content=' [1) V W1, G Y1 [2) [3]3 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
110
+ page_content=' Motion of the zeroes of (18) for Γ31 = 1, Γ32 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
111
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
112
+ page_content=' Note the presence of the bifurcation point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
113
+ page_content=' This is precisely the point where we create a pole of order two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
114
+ page_content=' The solid curve represents Im(z) + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
115
+ page_content='6 whereas the dashed curve gives Re(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
116
+ page_content=' LINE SHAPES AND DOUBLE POLES The line shape can be calculated from the solution of (16) and (6): S(ω) ≡ ρ11Re[ (γ2 + Γ32 − iδ) (Γ31 − iδ)(Γ32 − iδ) + |Gl|2 ] (21) which under the double pole condition 2|Gl| = |Γ31− Γ32| reduces to S(ω) = ρ11Re[γ2 + Γ32 − iδ (−iδ + γ0)2 ] = ρ11 δ2(γ1 + 2Λ) + γ2 0(γ2 + Λ) (δ2 + γ2 0)2 , γ0 = 1 2(γ1 + γ2 + 3Λ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
117
+ page_content=' (22) This is the modification of the line shape formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
118
+ page_content=' Note the double hump structure of the line shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
119
+ page_content=' Note further the sensitiveness of S(ω) to the pump- ing parameter Λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
120
+ page_content=' In the limit γ2 → 0 and Λ ≪ γ1, (22) reduces to S(ω) ≡ ρ11 γ1(δ2 + γ1 4 Λ) (δ2 + γ2 1 4 )2 (23) It is also interesting to note, that the scale param- eter is now (γ1/2) rather than γ1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
121
+ page_content=' Thus the total line shape is a sum of (a) Square of the Lorentzian (b) derivative of the Lorentzian (ζ/(ζ + γ0)2 ≡ −ζ ∂ ∂ζ ( 1 ζ+γ )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
122
+ page_content=' It is possible to consider an alternate model of pumping obtained by setting Λ = 0 in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
123
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
124
+ page_content=' As- suming that γ2 = 0, one can show that instead of (23) the spectral line shape is now given by S(ω) ≡ γ1ρ11δ2 (δ2 + γ2 1 4 )2 = (−δ ∂ ∂δ ) (γ1/2)ρ11 (δ2 + γ2 1/4) (24) which is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
125
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
126
+ page_content=' The figure also shows for comparison the Breit-Wigner formula (2) Note the double hump structure of the line shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
127
+ page_content=' The maxima now occur at δ = ±γ1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
128
+ page_content=' From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
129
+ page_content=' (14) we can also compute the time dependence of ρ11(t) under the condition of a double pole.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
130
+ page_content=' The result is ρ11(t) = (1 − γ1t 2 )2e−γ1t (25) It is again interesting to note that the time scale is governed by γ1/2 rather than γ1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
131
+ page_content=' The basic idea presented above is easily extended to more complex situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
132
+ page_content=' For example, two- photon decay in the system as shown in figure 4 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
133
+ page_content=' The modified line shape (24) (dashed) as a function of δ/γ1 and its comparison with the Breit- Wigner line shape (solid).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
134
+ page_content=' which is easily realizable atoms and molecules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
135
+ page_content=' The full Hamiltonian for this system can be written as H = ¯hω13 |1⟩ ⟨1| + ¯hω23 |2⟩ ⟨2| + ¯hω43 |4⟩ ⟨4| − ¯h(Gle−iωlt |4⟩ ⟨2| + h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
136
+ page_content='c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
137
+ page_content=') + � ks ¯hωksa† kxaks + V12 + V23 + V42 (26) where the meaning of different terms is obvious.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
138
+ page_content=' Again a canonical transformation will change the above H into a time-independent H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
139
+ page_content=' For V42 → 0, the above Hamiltonian becomes identical to the one for the quantum field theoretic extended cascade field model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
140
+ page_content=' We thus have a simple atomic realiza- tion of the field-theoretic model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
141
+ page_content=' As shown recently [7], this system exhibits very interesting two photon absorption characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
142
+ page_content=' Clearly, the electromag- netic coupling between the levels |2⟩ and |4⟩ can pro- duce a double pole in the decay of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
143
+ page_content=' It is interesting that a system equivalent to this has been studied by Bhamathi and Sudarshan [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
144
+ page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
145
+ page_content=' A scheme involving laser coupling the inter- mediate state |2⟩ which will create pole of order two in the two-photon decay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
146
+ page_content=' This provides an analog of the extended cascade model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
147
+ page_content=' DOUBLE POLES AND INTERFERENCE EFFECTS The existence of double poles and the possibility of a line shape which is a derivative of Lorentzian suggest that the quantum interferences must be crucial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
148
+ page_content=' This is indeed the case as can be seen from the following considerations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
149
+ page_content=' The electromag- netic coupling of |1⟩ and |2⟩ produced dressed states |ψ±⟩ = 1 √ 2(± |1⟩ + |2⟩) with eigenvalues ±Gl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
150
+ page_content=' Since Gl ∼ γ, the two states are within the radiative line width.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
151
+ page_content=' We pump the population in the state |1⟩ which is equivalent to pumping in both |ψ±⟩ as 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
152
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
153
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
154
+ page_content='0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
155
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
156
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
157
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
158
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
159
+ page_content='= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
160
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
161
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
162
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
163
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
164
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
165
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
166
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
167
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
168
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
169
+ page_content='0 Gt1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
170
+ page_content='0 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
171
+ page_content='8 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
172
+ page_content='6 1 1 S 1 1 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
173
+ page_content='4 1 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
174
+ page_content='2 I: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
175
+ page_content='0 2 0 2 4 S|1) 14) Laser Coupling [2) 3)4 |1⟩ = (|ψ+⟩ + |ψ−⟩)/ √ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
176
+ page_content=' Both states |ψ±⟩ can de- cay to |3⟩ as |ψ±⟩ involve admixtures of |1⟩ and |2⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
177
+ page_content=' These two decays will not be independent [8,9] as −→d +3 · −→d ∗ −3 ̸= 0 and as Gl ∼ γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
178
+ page_content=' EXPONENTIAL DECAY RECOVERED We also examine the initial conditions for our sys- tem which would result in exponential decay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
179
+ page_content=' From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
180
+ page_content=' (16) it is seen that d dτ ⟨(A13(t + τ) + iA23(t + τ))A31(t)⟩ + γ1 2 ⟨(A13(t + τ) + iA23(t + τ))A31(t)⟩ = 0 (27) if Gl = γ1 2 , γ2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
181
+ page_content=' Thus the correlation function defined in terms of the vector ˜ψ = 1 √ 2(|1⟩ + i |2⟩) obeys simple exponential decay law with a time scale governed by γ1/2 rather than γ1: � A ˜ ψ3(t + τ)A3 ˜ ψ(t) � = e−γ1τ/2 � A ˜ ψ ˜ ψ(t) � (28) Thus a pumping of the system to the state ˜ψ rather than |1⟩ will result in exponential decay[10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
182
+ page_content=' Thus, in conclusion, we have shown how higher- order poles in the decay of states can be produced by using resonant electromagnetic fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
183
+ page_content=' We demon- strated this by creating a pole of order two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
184
+ page_content=' Clearly, the technique is quite versatile and by using combi- nations of electromagnetic fields we can create poles of higher order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
185
+ page_content=' I thank George Sudarshan for discussions on higher order poles of S-Matrix and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
186
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
187
+ page_content=' Singh for help in preparation of this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
188
+ page_content=' [1] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
189
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
190
+ page_content=' Goldberger and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
191
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
192
+ page_content=' Watson, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
193
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
194
+ page_content=' 136, B 1472 (1964);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
195
+ page_content=' J, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
196
+ page_content=' Bell and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
197
+ page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
198
+ page_content=' Goebel, Phys, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
199
+ page_content=' 138, B 1198 (1965) [2] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
200
+ page_content=' Bhamathi and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
201
+ page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
202
+ page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
203
+ page_content=' Sudarshan, Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
204
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
205
+ page_content=' Mod Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
206
+ page_content=' B 10, 1531 (1996);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
207
+ page_content=' see also E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
208
+ page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
209
+ page_content='G Sudarshan, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
210
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
211
+ page_content=' A 50, 2006 (1994);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
212
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
213
+ page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
214
+ page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
215
+ page_content=' Sudarshan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
216
+ page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
217
+ page_content=' Chiu and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
218
+ page_content=' Bhamathi, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
219
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
220
+ page_content=' D 46, 3508 (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
221
+ page_content=' [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
222
+ page_content=' Bohm, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
223
+ page_content=' Maxson and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
224
+ page_content=' Loewe, Physica A, in press;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
225
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
226
+ page_content=' Mondragon and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
227
+ page_content=' Her- nandez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
228
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
229
+ page_content=' A 26, 5595 (1993);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
230
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
231
+ page_content=' Puntmann, paper presented at the International Colloquium on Group Theory, Goslar, Germany 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
232
+ page_content=' [4] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
233
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
234
+ page_content=' Agarwal, Quantum Optics (Springer-Verlag, Berlin, 1974).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
235
+ page_content=' [5] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
236
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
237
+ page_content=' Agarwal, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
238
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
239
+ page_content=' A 54, Rapid Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
240
+ page_content=' 3734 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
241
+ page_content=' [6] An almost trivial case occurs when Γ23 = 0, γ2 = 0 and no pumping (Λ = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
242
+ page_content=' The atom can start in state |1⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
243
+ page_content=' Then one can work with a nonhermitian Hamiltonian � −iγ1 G G 0 � which has identical real eigenvalues if 2G = γ1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
244
+ page_content=' This case has been pre- viously considered in literature (H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
245
+ page_content=' Steudel, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
246
+ page_content=' Physik 22, 113 (1969).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
247
+ page_content=' [7] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
248
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
249
+ page_content=' Agarwal and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
250
+ page_content=' Harshawardhan, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
251
+ page_content=' Rev, Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
252
+ page_content=' 77, 1039 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
253
+ page_content=' [8] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
254
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
255
+ page_content=' Agarwal, Quantum Optics (Springer-Verlag, Berlin, 1974) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
256
+ page_content=' 94-96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
257
+ page_content=' [9] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
258
+ page_content=' Imamoglu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
259
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
260
+ page_content=' A 40, 2835 (1989);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
261
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
262
+ page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
263
+ page_content=' Zhu and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
264
+ page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
265
+ page_content=' Scully, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
266
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
267
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
268
+ page_content=' 76, 388 (1996);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
269
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
270
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
271
+ page_content=' Cardimona, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
272
+ page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
273
+ page_content=' Raymer and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
274
+ page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
275
+ page_content=' Stroud Jr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
276
+ page_content=', J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
277
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
278
+ page_content=' B 15, 55 (1982).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
279
+ page_content=' [10] Pumping the system in the state ≫ is possible using an excitation pulse with phase switching at appro- priate instant (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
280
+ page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
281
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
282
+ page_content=' Bai, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
283
+ page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
284
+ page_content=' Yodh and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
285
+ page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
286
+ page_content=' Mossberg, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
287
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
288
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
289
+ page_content=' 55, 1277 (1984)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/aNE4T4oBgHgl3EQfnw2m/content/2301.05179v1.pdf'}
cNFAT4oBgHgl3EQfYB2G/content/tmp_files/2301.08537v1.pdf.txt ADDED
@@ -0,0 +1,1056 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Towards Multi-robot Exploration:
2
+ A Decentralized Strategy for UAV Forest Exploration
3
+ Luca Bartolomei, Lucas Teixeira and Margarita Chli
4
+ Vision For Robotics Lab, ETH Z¨urich, Switzerland
5
+ Abstract— Efficient exploration strategies are vital in tasks
6
+ such as search-and-rescue missions and disaster surveying.
7
+ Unmanned Aerial Vehicles (UAVs) have become particularly
8
+ popular in such applications, promising to cover large areas at
9
+ high speeds. Moreover, with the increasing maturity of onboard
10
+ UAV perception, research focus has been shifting toward higher-
11
+ level reasoning for single- and multi-robot missions. However,
12
+ autonomous navigation and exploration of previously unknown
13
+ large spaces still constitutes an open challenge, especially when
14
+ the environment is cluttered and exhibits large and frequent
15
+ occlusions due to high obstacle density, as is the case of forests.
16
+ Moreover, the problem of long-distance wireless communication
17
+ in such scenes can become a limiting factor, especially when
18
+ automating the navigation of a UAV swarm. In this spirit,
19
+ this work proposes an exploration strategy that enables UAVs,
20
+ both individually and in small swarms, to quickly explore
21
+ complex scenes in a decentralized fashion. By providing the
22
+ decision-making capabilities to each UAV to switch between
23
+ different execution modes, the proposed strategy strikes a
24
+ great balance between cautious exploration of yet completely
25
+ unknown regions and more aggressive exploration of smaller
26
+ areas of unknown space. This results in full coverage of forest
27
+ areas of variable density, consistently faster than the state of the
28
+ art. Demonstrating successful deployment with a single UAV as
29
+ well as a swarm of up to three UAVs, this work sets out the basic
30
+ principles for multi-root exploration of cluttered scenes, with
31
+ up to 65% speed up in the single UAV case and 40% increase in
32
+ explored area for the same mission time in multi-UAV setups.
33
+ I. INTRODUCTION
34
+ The growing interest in Unmanned Aerial Vehicles (UAVs)
35
+ has led to their extensive deployment in tasks such as
36
+ inspection and search-and-rescue missions. In these appli-
37
+ cations, the capacity of the robot to quickly explore and
38
+ map unknown environments autonomously is fundamental.
39
+ The literature on this topic is extensive, and many different
40
+ approaches have been proposed throughout the years [1]–
41
+ [5]. However, one of the biggest challenges in the explo-
42
+ ration of unknown environments is the capacity to achieve
43
+ a good trade-off between the competing goals of shorter
44
+ exploration times of an area of interest (i.e. pushing for
45
+ high-speed navigation) and safety, which requires caps on
46
+ the velocity of each robot. In fact, navigating in the vicinity
47
+ of the boundaries between known and unknown space is
48
+ challenging, as the robot can get stuck in dead ends, or
49
+ needs to perform complex dodging maneuvers to avoid
50
+ collisions. Consequently, to maintain the safety of both the
51
+ platform and its surroundings, most path planners generate
52
+ conservative start-and-stop motions, not fully exploiting the
53
+ This work was supported by NCCR Robotics, the Amazon Research
54
+ Awards, and the HILTI group.
55
+ Fig. 1: 3D-view of the proposed system guiding safe and successful
56
+ exploration of a UAV in a digital model of a real-world forest
57
+ [6]. The planner is able to avoid collisions between the UAV and
58
+ the obstacles, clearing frontiers on-the-go by balancing cautious
59
+ navigation with aggressive exploitation of known, free space, in a
60
+ bid to maximize the efficiency of the exploration.
61
+ capacity of a UAV to fly at high speeds. This effect is
62
+ exacerbated when the environment to explore is particularly
63
+ cluttered, as is the case in forests, leading to inefficient and
64
+ incomplete coverage. By design, these methods generally
65
+ drive the exploration process by biasing exploration towards
66
+ large areas of unexplored space. While this strategy could be
67
+ advantageous in open and wide spaces, it can be detrimental
68
+ when exploring cluttered scenes. In fact, the main pitfall of
69
+ such strategies is that, while the exploration process attempts
70
+ to cover as much unknown space as possible, when this
71
+ is deployed in environments with many obstacles, thinner
72
+ trails of unknown space are left unexplored (e.g. due to
73
+ occlusions), imposing the need for a second sweep of the
74
+ environment over mostly explored areas.
75
+ Aiming to mitigate these issues, pushing for faster cov-
76
+ erage of the areas of interest, multi-robot extensions for
77
+ exploration have also been proposed [7]–[10]. However,
78
+ these focus on the problem of coordination at the system-
79
+ level and, and while they can perform better from a global
80
+ planning point of view, they suffer from the same limitations
81
+ as the single-UAV case in obstacle-dense environments.
82
+ Motivated by these challenges, in this work we propose an
83
+ exploration strategy for autonomous UAV robots aiming to
84
+ explore forests of increasing tree density, as they pose some
85
+ of the most difficult challenges for exploration planning. Our
86
+ objective is to exploit the platform’s dynamics to the fullest
87
+ despite the high density of obstacles, in order to achieve the
88
+ complete coverage of the environment efficiently. To this end,
89
+ the proposed strategy enables switching between two differ-
90
+ arXiv:2301.08537v1 [cs.RO] 20 Jan 2023
91
+
92
+ ent behaviors for each robot; namely, cautious exploration
93
+ of unknown space and more aggressive maneuvers when
94
+ navigating in already explored areas to clear smaller portions
95
+ of unknown space caused by occlusions. We evaluate the
96
+ proposed approach in a series of challenging experiments in
97
+ simulation, both in randomly generated forests and in a 3D
98
+ reconstruction of a real forest (Fig 1). Benchmarking against
99
+ the state of the art reveals superior efficiency for the proposed
100
+ approach achieving higher overall UAV speeds and lower
101
+ exploration times. Finally, aiming to set out the scaffolding
102
+ toward decentralized multi-robot exploration planning, we
103
+ show how the proposed strategy can accommodate more than
104
+ one UAV in the exploration mission, and we demonstrate that
105
+ our method performs comparably to or better than a map-
106
+ splitting centralized approach.
107
+ In summary, the contributions of this work are as follows:
108
+ • the design of an exploration strategy, able to strike
109
+ an effective balance between cautious exploration and
110
+ aggressive exploitation of the explored map,
111
+ • the extension of the single-robot design to a multi-robot
112
+ decentralized approach, and
113
+ • extensive evaluations in simulation, demonstrating bet-
114
+ ter performance than the state of the art.
115
+ II. RELATED WORKS
116
+ Autonomous exploration of unknown environments with
117
+ UAVs has been an active field of research over the past
118
+ few decades. The most popular approach to exploring an
119
+ area of interest is to use frontiers, defined as the boundary
120
+ between known and unknown space [11]. These can be
121
+ utilized to identify potentially informative spatial regions
122
+ in order to drive the exploration process efficiently until
123
+ no new frontiers are found and the exploration process can
124
+ be considered complete. There are different criteria used to
125
+ decide which frontier to explore next, such as their proximity
126
+ to the current field of view, following a greedy selection
127
+ strategy, or having global planning dictate the selection [12].
128
+ However, while frontier-based approaches have been proven
129
+ to yield satisfactory performances, especially in terms of
130
+ coverage [2], [3], they generally lead to inefficient motions
131
+ and sub-optimal action selection. This is mostly caused by
132
+ the sensing modalities used to generate the map of the
133
+ environment to explore, as the most common sensors, such as
134
+ RGB-D and stereo cameras, have a limited detection range.
135
+ Consequently, UAVs need to fly cautiously to ensure safety.
136
+ Cieslewski et al. [4] tackle this limitation, by proposing
137
+ an exploration strategy that generates velocity commands
138
+ based on newly detected frontiers, in a bid to maximize
139
+ the UAV’s speed. This method is shown to outperform
140
+ classical methods [11], but focuses only on local frontiers.
141
+ Instead, FUEL [5] proposes a hierarchical planner which
142
+ generates efficient global paths, while encouraging safe and
143
+ agile local maneuvers for high-speed exploration. FUEL’s
144
+ strategy performs better than [4] and [11] in scenes with
145
+ low obstacle densities. However, it is more computationally
146
+ demanding, as it needs to maintain a list of active frontiers,
147
+ as well as to compute accurate distances between them. This
148
+ additional bookkeeping becomes prohibitive and impractical
149
+ in more cluttered and complex environments such as forests.
150
+ In fact, in this type of scenery, the number of frontiers
151
+ quickly increases due to occlusions caused by tree trunks,
152
+ branches, and shrubs.
153
+ Another line of research focuses instead on sampling-
154
+ based path planning to generate viewpoints to explore the
155
+ space [13], [14], by guiding the robot along possible trails
156
+ of sampled configurations. The best path is generally found
157
+ using a greedy approach [14], bringing to complete explo-
158
+ ration or accurate surface reconstruction [2] depending on
159
+ the information gain formulation. Nonetheless, these sampled
160
+ routes may generate trajectories that deviate from the shortest
161
+ paths, without taking into consideration the robot’s dynamics.
162
+ Consequently, this causes the UAV to navigate in zigzag
163
+ patterns, leading to inefficient, slow motions and conservative
164
+ maneuvers.
165
+ To tackle the limitations of frontier- and sampling-based
166
+ methods, also hybrid approaches have been proposed [1],
167
+ [15]. Such methods compute global paths towards the most
168
+ informative frontiers while generating local trajectories using
169
+ sampling-based planners. However, they do not exploit the
170
+ full dynamics of the platform and generate sub-optimal
171
+ routes.
172
+ To boost the efficiency in exploration, various multi-robot
173
+ cooperative frontier-based methods have also been proposed
174
+ in the literature, both in centralized [16] and decentralized
175
+ formats [17]. In this spirit, the work in [18] greedily assigns
176
+ view configurations, while [19] distributes the workload
177
+ between agents using a Voronoi-based partitioning of the
178
+ area to explore. Nevertheless, these solutions suffer from
179
+ the same limitations as in the single-robot case. Instead, the
180
+ approach in [20] is able to generate efficient trajectories for
181
+ 3D reconstruction, tackling the multi-robot coordination with
182
+ a centralized architecture. However, this method requires
183
+ a prior overhead flight over the area of interest, making
184
+ it unsuitable for the exploration of forests. The approach
185
+ proposed in [7] puts more focus on the problem of navigating
186
+ forests, but the emphasis is more on state estimation rather
187
+ than on path planning.
188
+ Motivated by these limitations, in this work, we propose
189
+ a strategy that allows a robot to explore complex forest-
190
+ like environments while flying at high speeds, thanks to
191
+ the freedom and flexibility that our planner provides to
192
+ each UAV to switch between different navigation modes
193
+ online. While slower, cautious exploration is performed
194
+ using a frontier-based approach, we efficiently clear trails of
195
+ unexplored space caused by occlusions by employing a more
196
+ aggressive local exploration strategy, boosting the efficiency
197
+ of the mission and pushing the overall time to cover a given
198
+ area of interest down. Moreover, we demonstrate that the
199
+ proposed pipeline can also be extended to the multi-robot
200
+ setting in a decentralized fashion.
201
+ III. METHODOLOGY
202
+ The overall problem considered in this work is to ex-
203
+ plore unknown cluttered environments, such as forests, in
204
+
205
+ (a) Time-instant 1
206
+ (b) Time-instant 2
207
+ (c) Time-instant 3
208
+ (d) Time-instant 4
209
+ Fig. 2: A schematic example demonstrating the problem with greedy frontier-based exploration, at progressive time-instants, generating
210
+ islands of unknown space surrounded by free regions. The field of view of the robot is depicted as a light-gray shaded area delimited by
211
+ black solid lines, while the obstacles and the unexplored space are in black and dark gray, respectively (a). The robot navigates towards
212
+ the most informative frontiers (b); however, due to the limited sensor range, the space occluded by the obstacles is not cleared (c).
213
+ Consequently, since the exploration process is biased towards larger, more informative frontiers, the na¨ıve planner flies the UAV robot
214
+ ignoring the smaller portion of unexplored space (d).
215
+ the minimum time possible. We assume that the robot is
216
+ equipped with a front-looking depth camera with a limited
217
+ sensing range and that the robot’s odometry information is
218
+ available at a constant rate. However, forest-like scenes are
219
+ characterized by a high number of obstacles in a variety
220
+ of dimensions (e.g. trunks, leaves, and branches) that make
221
+ standard frontier-based exploration approaches inefficient.
222
+ In fact, during the exploration process, many islands of
223
+ unknown space are usually left behind, as illustrated in
224
+ Fig. 2, necessitating subsequent passes of exploration on a
225
+ nearly completely explored map. To tackle this limitation,
226
+ we propose an exploration pipeline that can change the ex-
227
+ ploratory behavior of the robot depending on the frontiers in
228
+ its vicinity. In particular, we propose to define two different
229
+ modes of operation for the robot, namely the Explorer and
230
+ the Collector modes. In the Explorer state, the robot is driven
231
+ by frontiers and it is tasked to explore large unknown areas.
232
+ Consequently, it predominantly operates on the most external
233
+ boundaries between known and unknown areas. Conversely,
234
+ the robot in the Collector mode clears small islands of
235
+ unknown space generated by occlusions, that are left behind
236
+ during the exploratory phase. The objective of a Collector is
237
+ to clear these portions of space on the go, avoiding the need
238
+ for subsequent revisits of the map, at the expense of short
239
+ local detours. However, notice that these can be performed at
240
+ high speed, since, when in Collector mode, the robot operates
241
+ in mostly explored areas. By allowing a robot to switch
242
+ between these two different modes and by finding the right
243
+ trade-off between map exploration and exploitation, we can
244
+ quickly reach full coverage of large cluttered environments.
245
+ In the following, we first give an overview of the proposed
246
+ system and our exploration strategies, and then we illustrate
247
+ how it can be extended to multiple robots.
248
+ A. System Overview
249
+ As shown in Fig. 3, the pipeline is composed of three main
250
+ components: a mapping system, a mode selector, and a path
251
+ planner.
252
+ Given input depth and odometry information, a voxel
253
+ grid map M of the environment is generated. At every
254
+ update, frontiers are extracted from M and clustered. For
255
+ each cluster, we adopt the sampling strategy from [5] to
256
+ generate viewpoints covering the frontiers, and we use them
257
+ as possible target poses during the exploration process.
258
+ Moreover, each cluster undergoes a binary classification
259
+ step, where unconnected islands of frontiers, or trails, are
260
+ identified. This is necessary to identify those regions that are
261
+ likely to require an additional revisiting phase towards the
262
+ end of the mission if a traditional frontier-based exploration
263
+ method is utilized. Here, a cluster is considered a trail if its
264
+ convex hull is surrounded by free space, or when it has only
265
+ another neighboring cluster. This implies that most clusters
266
+ at the corners of the area to be explored are classified as
267
+ trails. We motivate this design choice by arguing that corners
268
+ are generally problematic for exploration due to their low
269
+ informative value. In fact, they are rarely covered in a first
270
+ sweep of the map, implying the need for a revisiting step.
271
+ The labeled clusters are then utilized by the Mode Selector to
272
+ choose the best exploration strategy for the robot, deciding
273
+ whether it has to persevere in its current mode, or transit
274
+ to Explorer or Collector. The mode assignment is regulated
275
+ according to the frontiers in the vicinity of the UAV. Given
276
+ that our objective is to clear trails locally to avoid large
277
+ detours on the map, we assign the role of Collector if a
278
+ minimum number of trails is close to the robot. Instead, we
279
+ adopt a more exploratory strategy once all smaller islands of
280
+ unknown space are cleared, or when the trails are far away
281
+ from the drone. Once a strategy is selected, the viewpoint of
282
+ the most promising cluster is selected as the new target pose.
283
+ This is fed to a path planner [21] that generates the trajectory
284
+ flying the UAV toward its destination. We now describe the
285
+ different exploration modes in more detail.
286
+ B. Exploration Strategies
287
+ 1) Explorer: Driven by frontiers, the objective of an
288
+ Explorer is to cover large areas of previously unknown space.
289
+ Similarly to [4], we process the incoming clusters of frontiers
290
+ C from the most recent map update and extract the one with
291
+ the lowest cost JE. Notice that these clusters are mostly
292
+ aligned with the direction of the UAV’s motion, implying
293
+ that, if one of these is selected as the target, the robot
294
+ avoids abrupt changes in the flight direction or aggressive
295
+
296
+ Mapping
297
+ 3D Map
298
+ Frontiers Classification
299
+ Frontiers Extraction
300
+ Odometry
301
+ Depth
302
+ Target Pose Selector
303
+ Trajectory Generation
304
+ Path Planning
305
+ Mode Selector
306
+ Low-level
307
+ Controller
308
+ Fig. 3: Schematic overview of the proposed exploration pipeline for a single agent. The inputs to the system are the robot’s odometry and
309
+ depth information. These are used to generate a 3D grid-based map of the environment, from which frontiers are extracted and clustered.
310
+ The trails of frontiers are identified and used to select the adequate exploration mode for the agent. Then, the next target pose is chosen
311
+ and a trajectory towards the goal pose is generated using [21].
312
+ maneuvers.
313
+ The cost associated to the viewpoint ξc := {xc, γc}
314
+ covering cluster c ∈ C is defined as
315
+ JE(ξc) := ωDJD(ξc) + ωV JV (ξc) + ωLJL(c),
316
+ (1)
317
+ where xc ∈ R3 is the position of the viewpoint and γc ∈
318
+ R its orientation. The cost JD is the length of the path in
319
+ M between the current robot’s position and xc, and it is
320
+ calculated using the A* algorithm. Instead, JV is associated
321
+ with the change in direction of travel, while JL to the label
322
+ of cluster c. The terms ωD, ωV and ωL are constant weights.
323
+ The cost JV (ξc) is calculated as
324
+ JV (ξc) := acos(vT
325
+ R
326
+ xc − xR
327
+ ||xc − xR||2
328
+ ),
329
+ (2)
330
+ where vR ∈ R3 and xR ∈ R3 are the robot’s current velocity
331
+ and position, respectively. This cost is directly associated
332
+ with the angle between the velocity and the direction vector
333
+ towards the candidate position xc covering cluster c. How-
334
+ ever, it may happen that the cluster is labeled as trails, e.g. in
335
+ the case of occlusions caused by thin obstacles, such as tree
336
+ trunks. Since an Explorer should focus on actual frontiers,
337
+ we assign a penalty to these clusters:
338
+ JL(c) =
339
+
340
+ 0
341
+ if c is frontier
342
+ ptrail
343
+ if c is trail
344
+ ,
345
+ (3)
346
+ where ptrail is the constant penalty associated with trails.
347
+ We then select as target pose the next best viewpoint
348
+ ξc∗ := {xc∗, γc∗} covering the cluster c∗ with the lowest
349
+ cost:
350
+ ξc∗ := arg min
351
+ ξc ∀c∈C JE(ξc).
352
+ (4)
353
+ In case the UAV is trapped in a dead-end, or if no new
354
+ clusters are available in front of the robot, we ignore the cost
355
+ associated with the robot’s velocity and we employ a greedy
356
+ approach to select the new target pose. We find the best
357
+ cluster in the vicinity of the robot at a maximum distance
358
+ dmax using the same cost function as in Eq. 1, with JV set
359
+ to zero:
360
+ ξc∗ := arg min
361
+ ξc ∀c∈C ωDJD(ξc) + ωLJL(c)
362
+ s.t. ||xc − xR||2 ≤ dmax.
363
+ (5)
364
+ 2) Collector: The objective of a Collector is to clear as
365
+ many trails as possible, in order to avoid the need for a
366
+ revisiting step in poorly explored regions of the map at the
367
+ end of the mission. Since this task implies a detour from
368
+ the main direction of exploration, the UAV’s speed needs
369
+ to be maximized in order to go back to Explorer mode as
370
+ soon as possible. To reach this objective, we sort the set of
371
+ trails Ctrails ⊆ C by associating a cost JC to each cluster
372
+ c ∈ Ctrails:
373
+ JC(ξc) := ωP JP (ξc) + ωAJA(ξc),
374
+ (6)
375
+ where JP is associated with the time to reach xc and JA with
376
+ the time to cover the angular change between the current
377
+ robot’s yaw and the viewpoint’s orientation. Instead, ωP and
378
+ ωA are constant weights.
379
+ Given the path πR
380
+ c
381
+ from xR to xc and the maximum
382
+ allowed velocity vmax, JP is computed as
383
+ JP (ξc) := length(πR
384
+ c )
385
+ vmax
386
+ .
387
+ (7)
388
+ Similarly, given the robot current’s heading γR, the view-
389
+ point’s orientation γc and the maximum allowed yaw rate
390
+ ˙γmax, JA is computed as
391
+ JA(ξc) := ∠(γR, γc)
392
+ ˙γmax
393
+ ,
394
+ (8)
395
+ where ∠(γR, γc) indicates the angular difference between γR
396
+ and γc.
397
+ The robot then selects the target trail in a step-by-step
398
+ greedy procedure and behaves as a Collector until all close-
399
+ by trails are cleared. Since the trails are surrounded by free
400
+ known space, we double the maximum velocity compared to
401
+ when in Explorer mode. Consequently, the UAV is able to
402
+ maximize its velocity, leading to fast motions that allow it
403
+ to quickly cover all the viewpoints associated with the trails.
404
+ C. Extension to Multi-Robot
405
+ 1) System Architecture: The proposed exploration strat-
406
+ egy can be easily extended to the multi-robot case. The
407
+ extended pipeline is shown in Fig. 4. Assuming that the
408
+ agents can localize in a common reference frame, they
409
+ exchange local sub-maps, as well as odometry informa-
410
+ tion, current target pose, and execution mode. Notice that
411
+ here we propose a decentralized architecture. Centralized
412
+ approaches generally assume infinite-range communication
413
+ between agents and with a ground station. However, standard
414
+ Wi-Fi communications have a limited range and, when
415
+ navigating in cluttered environments such as forests, com-
416
+ munication lines can be potentially obstructed and signal
417
+ can be lost. In this work, we propose to use a more flexible
418
+ point-to-point strategy, assuming that there exists a maximum
419
+ range of communication between each pair of agents. Our
420
+
421
+ Mapping
422
+ Path Planning
423
+ Mode Selector
424
+ Agent 1
425
+ Odometry
426
+ Depth
427
+ Mapping
428
+ Path Planning
429
+ Mode Selector
430
+ Agent 2, ..., N
431
+ Odometry
432
+ Depth
433
+ Mode
434
+ Target Poses
435
+ Local
436
+ Sub-maps
437
+ Mode
438
+ Fig. 4: Overview of the pipeline in the multi-robot setting, where
439
+ the UAVs are tasked to collaboratively build a complete map of the
440
+ area of interest. To fulfill the objective, agents exchange odometry
441
+ information and local sub-maps, as well as current execution mode
442
+ and target poses. We assume there exists a maximum communi-
443
+ cation range between robots. If the distance between two agents
444
+ exceeds this limit, communication is lost and information is not
445
+ exchanged anymore, leading to poor coordination.
446
+ design targets to keep the agents at a valid communication
447
+ distance. If the distance between agents is higher than the
448
+ maximum range, communication is lost and information is
449
+ not exchanged anymore, leading to poor inter-agent coor-
450
+ dination and sub-optimal decision-making. We also assume
451
+ that, when communication is lost and successively regained,
452
+ agents can synchronize their maps.
453
+ 2) Multi-robot Coordination: We encourage coordination
454
+ between pairs of agents only if they perform compatible
455
+ actions from an exploration point of view. This implies that,
456
+ if two UAVs are operating in the same mode, they can
457
+ collaborate in order to either explore more unknown spaces
458
+ (Explorers) or clear trails (Collectors). On the contrary, coor-
459
+ dination between an Explorer and a Collector should not be
460
+ encouraged, since their tasks are intrinsically different. In this
461
+ situation, we propose a leader-follower paradigm, where the
462
+ Explorer (leader) explores unknown areas regardless of the
463
+ position of the second agent, while the Collector (follower)
464
+ follows the leader and clears the trails left unexplored. This
465
+ design choice allows more flexibility during the execution
466
+ of a mission, thanks to the possibility to change execution
467
+ modes online. Notice that, if communication between all
468
+ agents is lost, their exploration strategy falls back to the
469
+ single-robot case.
470
+ Our collaboration strategy within a robotic team is encour-
471
+ aged as a soft constraint by modifying the cost functions in
472
+ Eq. 1 and Eq. 6. If we consider robot i with position xi
473
+ R,
474
+ given the positions X i
475
+ R := {xk
476
+ R}N−1
477
+ k=0 of the other N − 1
478
+ robots in the team with k ̸= i, and their current target
479
+ positions GR = {xk
480
+ c∗}N−1
481
+ k=0 , we modify the cost functions
482
+ JE and JC for robot i as follows:
483
+ JE(ξc, X i
484
+ R, GR) := ωDJD(ξc) + ωV JV (ξc)+
485
+ ωLJL(c) + ωF JF (ξc, X i
486
+ R, GR)
487
+ (9)
488
+ and
489
+ JC(ξc, X i
490
+ R, GR) := ωP JP (ξc) + ωAJA(ξc)+
491
+ ωF JF (ξc, X i
492
+ R, GR),
493
+ (10)
494
+ where
495
+ ωF
496
+ is
497
+ a
498
+ constant
499
+ weight.
500
+ The
501
+ cost
502
+ function
503
+ JF (ξc, X i
504
+ R, GR) is defined as
505
+ JF (ξc, X i
506
+ R, GR) := Jatt
507
+ F (X i
508
+ R) + Jrep
509
+ F
510
+ (ξc, X i
511
+ R, GR),
512
+ (11)
513
+ where Jatt
514
+ F
515
+ aims at keeping the agents i and k in com-
516
+ munication range, while Jrep
517
+ F
518
+ ensures a minimum distance
519
+ between them to avoid collisions. Moreover, Jrep
520
+ F
521
+ encourages
522
+ map splitting, by assigning a high cost to candidate target
523
+ positions close to other agents’ current goals.
524
+ In more details, the function Jatt
525
+ F
526
+ for agent i is defined as
527
+ follows:
528
+ Jatt
529
+ F (X i
530
+ R) :=
531
+ N−1
532
+
533
+ k=0,k̸=i
534
+ I(i, k) · 1
535
+ 2kA||xi
536
+ R − xk
537
+ R||2,
538
+ (12)
539
+ where kA is constant factor and I(i, k) is an indicator
540
+ function that embeds our coordination strategy:
541
+ I(i, k) :=
542
+
543
+ 0
544
+ if i Explorer and k Collector
545
+ 1
546
+ otherwise
547
+ .
548
+ (13)
549
+ This indicates that agent i is attracted toward agent k only if
550
+ they are in a compatible execution mode. On the contrary, in
551
+ leader-follower mode, i.e. when robot i is an Explorer and
552
+ robot k is a Collector, the leader ignores the follower, and
553
+ Jatt
554
+ F
555
+ goes to zero. Notice that instead, if i is Collector and
556
+ k an Explorer, I(i, k) = 1 and Jatt
557
+ F
558
+ ̸= 0.
559
+ Instead, Jrep
560
+ F
561
+ is computed as follows:
562
+ Jrep
563
+ F
564
+ (ξc, X i
565
+ R, GR) :=
566
+ N−1
567
+
568
+ k=0,k̸=i
569
+ Jrep
570
+ ik (xi
571
+ R, xk
572
+ R) + Jrep
573
+ ik (xi
574
+ c, xk
575
+ c∗),
576
+ (14)
577
+ where, given the Euclidean distance dAB := ||xA − xB||2,
578
+ Jrep
579
+ AB(xA, xB) :=
580
+
581
+
582
+
583
+
584
+
585
+ kR(dc − d0)2 dcd0
586
+ d0−dc
587
+ if dAB ≤ d0
588
+ kR(dAB − d0)2
589
+ if dc ≤ dAB ≤ d0
590
+ 0
591
+ otherwise
592
+ .
593
+ (15)
594
+ The parameter kR is a constant weight, while d0 represents
595
+ the minimum distance between positions A and B to have
596
+ a collision. The parameter dc represents the distance after
597
+ which the positions should not approach any closer. This
598
+ can be selected on the basis of the safety distance required
599
+ between the UAVs. Notice that Jrep
600
+ ik
601
+ is not influenced by
602
+ the roles of agents i and k, as safety and minimum distance
603
+ requirements need to be always met.
604
+ IV. EXPERIMENTS
605
+ We evaluate the proposed exploration pipeline in both
606
+ single- and multi-UAV setups in simulation. In particular,
607
+ we benchmark our method on a series of realistic, randomly
608
+ generated forests of increasing tree densities [22], as well as
609
+ on a 3D reconstruction of a real forest [6].
610
+ In the single-agent setup, we compare the proposed
611
+ method against FUEL [5], while in the experiments with
612
+ multiple robots we test against a centralized strategy based
613
+ on map-splitting. In all tests, we use grid map resolutions of
614
+ 0.10 m or 0.15 m depending on the map size, while we set the
615
+ dynamic limits to vmax = 1.5 m/s and ˙γmax = 0.9 rad/s for
616
+ all planners. We simulate a depth camera with a fixed range
617
+
618
+ Ours
619
+ FUEL [5]
620
+ REAL FOREST
621
+ Completion Time [s]
622
+ 500.7 ± 14.8
623
+ 757.7 ± 47.9
624
+ Travelled Distance [m]
625
+ 645.0 ± 20.0
626
+ 533.2 ± 11.0
627
+ Velocity [m/s]
628
+ 1.3 ± 0.5
629
+ 0.7 ± 0.4
630
+ SPARSE FOREST (0.05 TREES / m2)
631
+ Completion Time [s]
632
+ 665.4 ± 32.7
633
+ 1114.1 ± 97.4
634
+ Travelled Distance [m]
635
+ 860.9 ± 34.0
636
+ 758.1 ± 57.6
637
+ Velocity [m/s]
638
+ 1.3 ± 0.6
639
+ 0.7 ± 0.5
640
+ AVERAGE-DENSITY FOREST (0.10 TREES / m2)
641
+ Completion Time [s]
642
+ 779.6 ± 110.9
643
+ 954.1 ± 28.8
644
+ Travelled Distance [m]
645
+ 910.3 ± 63.7
646
+ 713.5 ± 33.1
647
+ Velocity [m/s]
648
+ 1.2 ± 0.6
649
+ 0.7 ± 0.5
650
+ DENSE FOREST (0.15 TREES / m2)
651
+ Completion Time [s]
652
+ 613.2 ± 16.2
653
+ 1130.2 ± 28.8
654
+ Travelled Distance [m]
655
+ 789.7 ± 16.8
656
+ 791.0 ± 37.7
657
+ Velocity [m/s]
658
+ 1.2 ± 0.6
659
+ 0.7 ± 0.5
660
+ VERY DENSE FOREST (0.20 TREES / m2)
661
+ Completion Time [s]
662
+ 658.2 ± 57.2
663
+ 904.1 ± 109.5
664
+ Travelled Distance [m]
665
+ 802.0 ± 52.7
666
+ 680.6 ± 49.9
667
+ Velocity [m/s]
668
+ 1.2 ± 0.6
669
+ 0.7 ± 0.5
670
+ TABLE I: Results of the experiments for a single agent. We report
671
+ the average completion time over 3 runs, as well as the average
672
+ travelled distance and velocity. The best performance is in bold.
673
+ The relatively high standard deviation in the timings to complete
674
+ the missions, in particular in the cases of tree densities of 0.10 and
675
+ 0.20 trees/m2, is caused by the complex nature of the map and the
676
+ large number of occlusions.
677
+ 0
678
+ 200
679
+ 400
680
+ 600
681
+ Time [s]
682
+ 0
683
+ 2000
684
+ Explored Volume [m3]
685
+ Ours
686
+ FUEL
687
+ Fig. 5: The average exploration rate during the experiments in
688
+ the model REAL FOREST. The shaded region shows the standard
689
+ deviation. The proposed method reaches complete coverage in less
690
+ time than FUEL [5].
691
+ of 4.5 m using the Vulkan-based renderer of [23] and the
692
+ same physical simulator as in [5]. We report the planners’
693
+ performance in terms of the time needed to complete the
694
+ exploration of the scene, the total travelled distance, and the
695
+ average velocity of the UAVs during each experiment.
696
+ A. Single-robot Experiments
697
+ In the single-robot experiments, the models of the syn-
698
+ thetic forests are of size 50 m × 50 m × 2 m, while the 3D
699
+ reconstruction of the REAL FOREST has dimensions 40 m ×
700
+ 40 m×2 m. The map resolution is set to 0.10 m. As reported
701
+ in Table I, the proposed planner outperforms FUEL [5] across
702
+ all scenes in terms of the time taken to reach full coverage
703
+ (Fig. 5), thanks to our adaptive exploration policy that leads
704
+ 0
705
+ 200
706
+ 400
707
+ 600
708
+ Time [s]
709
+ 0.5
710
+ 1.0
711
+ 1.5
712
+ Velocity [m/s]
713
+ Ours
714
+ FUEL
715
+ Fig. 6: The average UAV velocity during the experiments in the
716
+ REAL FOREST. The shaded region indicates the standard deviation.
717
+ The proposed strategy is able to fly the UAV at higher velocities
718
+ than FUEL leading to improved mission efficiency (i.e. time to
719
+ mission completion). Towards the end of the mission, the UAV
720
+ following the FUEL strategy also speeds up, as by then the map
721
+ is mostly explored, and smaller trails are cleared, resembling the
722
+ Collector mode in the proposed strategy.
723
+ Fig. 7: Top view of an exploration mission, where a team of two
724
+ UAVs is tasked to map a randomly generated forest with tree
725
+ density 0.05 trees/m2, illustrated with the black dots here. The
726
+ initial positions of the UAVs are denoted as colored blobs and the
727
+ final positions as enlarged drone models for clearer visualization.
728
+ The map is represented as a 2D occupancy grid obtained by slicing
729
+ the 3D model at 1.5 m from the ground.
730
+ to consistently higher UAV velocity throughout each mission,
731
+ as illustrated in Fig. 6. These results demonstrate the benefit
732
+ of using the proposed adaptive exploration strategy over
733
+ a fixed-mode method. However, notice that the proposed
734
+ design leads to longer travelled distances, albeit guaranteeing
735
+ that there are no small unexplored areas left. In fact, decision-
736
+ making both in Explorer and Collector modes is done on a
737
+ local-map level, and this may cause the UAV to fly longer
738
+ routes, deviating from the shortest path. Nonetheless, in the
739
+ proposed strategy we compensate for this shortcoming by
740
+ encouraging decisions leading to higher UAV velocities, and
741
+ thus shorter mission times.
742
+ B. Multi-robot Experiments
743
+ In the multi-robot experiments, the proposed planning
744
+ strategy is tested in a variety of models with fixed, homoge-
745
+ neous obstacle density, as well as in a randomly generated
746
+ forest with tree density varying across different regions of
747
+ the map.
748
+ 1) Maps with a fixed tree density: The results of the multi-
749
+ robot collaborative exploration strategy of maps with fixed
750
+ tree densities with two agents are shown in Table II, where
751
+
752
+ 0
753
+ +Ours
754
+ Split Map (FUEL [5])
755
+ SPARSE FOREST (0.05 TREES / m2)
756
+ Completion Time [s]
757
+ 780.3 ± 32.2
758
+ 834.3 ± 64.3
759
+ Travelled Distance [m]
760
+ 958.8 ± 9.2
761
+ 595.7 ± 51.8
762
+ 958.7 ± 60.4
763
+ Velocity [m/s]
764
+ 1.2 ± 0.6
765
+ 0.7 ± 0.4
766
+ 1.3 ± 0.7
767
+ AVERAGE-DENSITY FOREST (0.10 TREES / m2)
768
+ Completion Time [s]
769
+ 838.5 ± 64.4
770
+ 848.1 ± 98.5
771
+ Travelled Distance [m]
772
+ 915.3 ± 68.4
773
+ 616.1 ± 40.7
774
+ 906.2 ± 58.3
775
+ Velocity [m/s]
776
+ 1.2 ± 0.5
777
+ 0.8 ± 0.4
778
+ 1.1 ± 0.6
779
+ DENSE FOREST (0.15 TREES / m2)
780
+ Completion Time [s]
781
+ 786.3 ± 40.7
782
+ 754.0 ± 23.9
783
+ Travelled Distance [m]
784
+ 839.0 ± 26.1
785
+ 586.7 ± 14.2
786
+ 882.2 ± 32.0
787
+ Velocity [m/s]
788
+ 1.2 ± 0.7
789
+ 0.8 ± 0.4
790
+ 1.3 ± 0.7
791
+ VERY DENSE FOREST (0.20 TREES / m2)
792
+ Completion Time [s]
793
+ 803.7 ± 52.8
794
+ 705.8 ± 73.2
795
+ Travelled Distance [m]
796
+ 873.8 ± 47.0
797
+ 580.3 ± 73.3
798
+ 912.6 ± 42.7
799
+ Velocity [m/s]
800
+ 1.2 ± 0.7
801
+ 0.7 ± 0.5
802
+ 1.2 ± 0.8
803
+ TABLE II: Results in randomly generated forests with fixed tree
804
+ densities explored with two UAVs, averaged over 3 runs. For the
805
+ proposed strategy we report the average travelled distance and
806
+ velocity per agent. The best performance is highlighted in bold.
807
+ 0
808
+ 200
809
+ 400
810
+ 600
811
+ 800
812
+ Time [s]
813
+ 0
814
+ 2500
815
+ Explored Volume [m3]
816
+ Agent 1
817
+ Agent 2
818
+ Fig. 8: The average exploration rate per agent with the proposed
819
+ approach using two UAVs during the experiments in a random
820
+ forest with density 0.10 trees/m2. The shaded region shows the
821
+ standard deviation. The explored volume is shown to be consistently
822
+ balanced across the two agents.
823
+ a maximum connection distance of 50 m for data exchange
824
+ between agents is assumed (Fig. 7). Here, experiments are
825
+ performed in forest models of size 100 m×50 m×2 m with
826
+ a map resolution of 0.10 m. The proposed approach is com-
827
+ pared against a centralized strategy we devised, employing
828
+ the FUEL planner [5] and assigning the UAVs to explore
829
+ maps of equal sizes (i.e. using map-splitting). Note that this
830
+ strategy assumes homogeneous forest maps, and knowledge
831
+ of the original map size, which renders this unsuitable for
832
+ realistic deployment unlike the proposed approach; however,
833
+ comparisons are presented for the sake of benchmarking.
834
+ The proposed strategy reaches comparable results with
835
+ respect to the centralized approach using FUEL. Similarly
836
+ to single-robot exploration, the proposed strategy flies the
837
+ UAVs at higher speeds, incurring longer travel distances.
838
+ Moreover, our strategy enables automatic load balancing of
839
+ the exploration mission, yielding similar exploration rates
840
+ per agent as illustrated in Fig. 8. However, in denser for-
841
+ est models, the performance of the proposed approach is
842
+ seen to degrade, as the UAVs are tasked to fly within a
843
+ connection range to each other, resulting in limited freedom
844
+ of movement. This is exacerbated by the increased number
845
+ of obstacles and occlusions in smaller maps, leading to
846
+ lower exploration rates. Nevertheless, as aforementioned, the
847
+ proposed approach is realistically deployable in contrast to
848
+ the baseline strategy using FUEL.
849
+ 2) Map with non-homogeneous tree density: The results
850
+ of the experiments in a map with non-homogeneous obstacle
851
+ densities with a team of two agents are shown in Table III.
852
+ We utilize a model with size 100 m × 200 m × 2 m, with
853
+ different tree densities (0.2, 0.3 and 0.5 trees/m2) across
854
+ distinct map regions. The occupancy grip map resolution is
855
+ set to 0.15 m, with a maximum inter-agent communication
856
+ range of 200 m. We perform the same analysis as in Sec.
857
+ IV-B.1, assuming a realistic maximum flight time of 1500 s
858
+ for the UAVs. As none of the tested planners is able to fully
859
+ explore the environment within the allowed time, we report
860
+ the total team coverage at fixed timestamps. The proposed
861
+ approach consistently outperforms the solution based on
862
+ map-splitting using FUEL [5] as the planning back-end. The
863
+ gain in performance is related to the capacity of our strategy
864
+ to fly the UAVs faster in regions with lower obstacle density
865
+ and to explore cautiously more cluttered areas while clearing
866
+ smaller frontier trails on the go. This leads to a more efficient
867
+ exploration process, able to better exploit the capacity of
868
+ UAVs to perform highly dynamic flights.
869
+ Finally, we report the performance of our method when
870
+ a team of three robots is deployed. As shown in Table
871
+ IV, a larger team size yields higher total coverage. This
872
+ demonstrates that this work presents an effective strategy for
873
+ peer-to-peer sharing of the responsibility of exploration of
874
+ a large forest area and that the extension to larger teams
875
+ of multiple UAVs can be realized following this paradigm,
876
+ pushing for scalable, decentralized multi-robot planning for
877
+ exploration.
878
+ V. CONCLUSION AND FUTURE WORK
879
+ In this work, we propose an exploration pipeline for
880
+ autonomous UAVs operating in complex, cluttered environ-
881
+ ments, with a particular focus on forests. We choose this type
882
+ of environment as one of the inherently most challenging for
883
+ effective planning due to the increased number of obstacles
884
+ and occlusions that they exhibit. The proposed strategy
885
+ allows each UAV to switch between different exploratory
886
+ behaviors, autonomously balancing cautious exploration of
887
+ unknown space and more aggressive maneuvers, exploiting
888
+ already mapped space within a mission. This leads to faster
889
+ completion times due to higher-speed flights and, conse-
890
+ quently, to more efficient and faster map coverage than the
891
+ state of the art. Moreover, we show how the proposed method
892
+ can be extended to three, and potentially more robots in a
893
+ decentralized fashion, demonstrating automatic and effective
894
+ load balancing across the participating agents. Following the
895
+
896
+ Ours
897
+ Split Map (FUEL [5])
898
+ Travelled Distance [m]
899
+ 1481.9 ± 467.0
900
+ 735.2 ± 500.1
901
+ 1487.0 ± 440.8
902
+ 697.1 ± 365.6
903
+ Velocity [m/s]
904
+ 1.3 ± 0.6
905
+ 0.7 ± 0.4
906
+ 1.2 ± 0.7
907
+ 0.6 ± 0.5
908
+ Explored Volume [m3] - 300 s
909
+ 542.3 ± 23.3
910
+ 514.6 ± 313.0
911
+ 565.9 ± 31.4
912
+ 632.0 ± 184.8
913
+ Explored Volume [m3] - 600 s
914
+ 1062.8 ± 81.3
915
+ 840.0 ± 580.7
916
+ 1011.6 ± 65.3
917
+ 728.3 ± 163.5
918
+ Explored Volume [m3] - 900 s
919
+ 1400.0 ± 265.3
920
+ 1145.3 ± 814.7
921
+ 1302.6 ± 110.5
922
+ 798.8 ± 146.2
923
+ Explored Volume [m3] - 1200 s
924
+ 1626.7 ± 431.2
925
+ 1330.1 ± 905.3
926
+ 1466.5 ± 185.4
927
+ 910.4 ± 83.9
928
+ Explored Volume [m3] - 1500 s
929
+ 1816.0 ± 550.8
930
+ 1437.8 ± 971.8
931
+ 1637.0 ± 299.1
932
+ 1040.5 ± 124.3
933
+ TABLE III: Results in a randomly generated forest with non-
934
+ homogeneous tree densities when explored with two UAVs, av-
935
+ eraged over 3 runs. We report the average travelled distance and
936
+ velocity per agent at 1500 s, as well as the total explored volume
937
+ by the team at different timestamps. The best performance is
938
+ highlighted in bold.
939
+ Timestamp
940
+ Two Agents
941
+ Three Agents
942
+ 300 s
943
+ 1108.2 ± 54.7 m3
944
+ 1208.0 ± 91.6 m3
945
+ 600 s
946
+ 2074.3 ± 144.8 m3
947
+ 2098.4 ± 143.8 m3
948
+ 900 s
949
+ 2702.6 ± 375.9 m3
950
+ 2748.3 ± 111.9 m3
951
+ 1200 s
952
+ 3093.2 ± 616.6 m3
953
+ 3223.5 ± 127.9 m3
954
+ 1500 s
955
+ 3453.0 ± 849.9 m3
956
+ 3621.8 ± 321.0 m3
957
+ TABLE IV: Results in a randomly generated forest with varying
958
+ tree densities across different regions of the model, averaged over
959
+ 3 runs. Here, the map is explored with teams composed of two and
960
+ three UAVs. We report the total volume covered by the team at
961
+ different timestamps. The best performance is highlighted in bold.
962
+ push for automating higher-level decision-making in robotic
963
+ missions, this work constitutes a key milestone towards
964
+ effective exploration planning for robotic teams.
965
+ The natural next step for this work is to address the inte-
966
+ gration and deployment of the proposed pipeline onboard real
967
+ platforms, while further investigations will push advancing
968
+ coordination strategies in larger multi-robot teams.
969
+ REFERENCES
970
+ [1] M. Selin, M. Tiger, D. Duberg, F. Heintz, and P. Jensfelt, “Efficient
971
+ Autonomous Exploration Planning of Large-Scale 3-D Environments,”
972
+ IEEE Robotics and Automation Letters, 2019.
973
+ [2] L. Schmid, M. Pantic, R. Khanna, L. Ott, R. Siegwart, and J. Nieto,
974
+ “An Efficient Sampling-Based Method for Online Informative Path
975
+ Planning in Unknown Environments,” IEEE Robotics and Automation
976
+ Letters, 2020.
977
+ [3] Y. Kompis, L. Bartolomei, R. Mascaro, L. Teixeira, and M. Chli,
978
+ “Informed Sampling Exploration Path Planner for 3D Reconstruction
979
+ of Large Scenes,” IEEE Robotics and Automation Letters, 2021.
980
+ [4] T. Cieslewski, E. Kaufmann, and D. Scaramuzza, “Rapid exploration
981
+ with multi-rotors: A frontier selection method for high speed flight,”
982
+ in 2017 IEEE/RSJ International Conference on Intelligent Robots and
983
+ Systems (IROS), 2017.
984
+ [5] B. Zhou, Y. Zhang, X. Chen, and S. Shen, “FUEL: Fast UAV
985
+ Exploration Using Incremental Frontier Structure and Hierarchical
986
+ Planning,” IEEE Robotics and Automation Letters, 2021.
987
+ [6] A. Ahmad, V. Walter, P. Petr´aˇcek, M. Petrl´ık, T. B´aˇca, D. ˇZaitl´ık,
988
+ and M. Saska, “Autonomous Aerial Swarming in GNSS-denied En-
989
+ vironments with High Obstacle Density,” in 2021 IEEE International
990
+ Conference on Robotics and Automation (ICRA), 2021.
991
+ [7] Y. Tian, K. Liu, K. Ok, L. Tran, D. Allen, N. Roy, and J. P. How,
992
+ “Search and rescue under the forest canopy using multiple UAVs,”
993
+ The International Journal of Robotics Research, 2020.
994
+ [8] T. Rouˇcek, M. Pecka, P. ˇC´ıˇzek, T. Petˇr´ıˇcek, J. Bayer, V. ˇSalansk`y,
995
+ D. Heˇrt, M. Petrl´ık, T. B´aˇca, V. Spurn`y et al., “Darpa subterranean
996
+ challenge: Multi-robotic exploration of underground environments,”
997
+ in International Conference on Modelling and Simulation for Au-
998
+ tonomous Systems.
999
+ Springer, 2019.
1000
+ [9] L. Bartolomei, M. Karrer, and M. Chli, “Multi-robot Coordination with
1001
+ Agent-Server Architecture for Autonomous Navigation in Partially
1002
+ Unknown Environments,” in 2020 IEEE/RSJ International Conference
1003
+ on Intelligent Robots and Systems (IROS), 2020.
1004
+ [10] M. Corah, C. O’Meadhra, K. Goel, and N. Michael, “Communication-
1005
+ efficient planning and mapping for multi-robot exploration in large
1006
+ environments,” IEEE Robotics and Automation Letters, 2019.
1007
+ [11] B. Yamauchi, “A frontier-based approach for autonomous exploration,”
1008
+ in Proceedings 1997 IEEE International Symposium on Computational
1009
+ Intelligence in Robotics and Automation CIRA’97, 1997.
1010
+ [12] D. L. da Silva Lubanco, M. Pichler-Scheder, and T. Schlechter,
1011
+ “A novel frontier-based exploration algorithm for mobile robots,” in
1012
+ 2020 6th International Conference on Mechatronics and Robotics
1013
+ Engineering (ICMRE), 2020.
1014
+ [13] C. Connolly, “The determination of next best views,” in Proceedings.
1015
+ IEEE International Conference on Robotics and Automation, 1985.
1016
+ [14] A. Bircher, M. Kamel, K. Alexis, H. Oleynikova, and R. Siegwart,
1017
+ “Receding Horizon ”Next-Best-View” Planner for 3D Exploration,”
1018
+ in 2016 IEEE International Conference on Robotics and Automation
1019
+ (ICRA), 2016.
1020
+ [15] B. Charrow, G. Kahn, S. Patil, S. Liu, K. Goldberg, P. Abbeel,
1021
+ N. Michael, and V. R. Kumar, “Information-theoretic planning with
1022
+ trajectory optimization for dense 3d mapping,” Robotics: Science and
1023
+ Systems XI, 2015.
1024
+ [16] A. Mannucci, S. Nardi, and L. Pallottino, “Autonomous 3D Explo-
1025
+ ration of Large Areas: A Cooperative Frontier-Based Approach”,” in
1026
+ Modelling and Simulation for Autonomous Systems.
1027
+ Cham: Springer
1028
+ International Publishing, 2018.
1029
+ [17] R. G. Colares and L. Chaimowicz, “The next frontier: combining
1030
+ information gain and distance cost for decentralized multi-robot explo-
1031
+ ration,” Proceedings of the 31st Annual ACM Symposium on Applied
1032
+ Computing, 2016.
1033
+ [18] G. Hardouin, J. Moras, F. Morbidi, J. Marzat, and E. M. Mouaddib,
1034
+ “Next-Best-View planning for surface reconstruction of large-scale 3D
1035
+ environments with multiple UAVs,” in 2020 IEEE/RSJ International
1036
+ Conference on Intelligent Robots and Systems (IROS), 2020.
1037
+ [19] A. Dutta, A. Bhattacharya, O. P. Kreidl, A. Ghosh, and P. Dasgupta,
1038
+ “Multi-robot informative path planning in unknown environments
1039
+ through continuous region partitioning,” International Journal of Ad-
1040
+ vanced Robotic Systems, 2020.
1041
+ [20] D. Morilla-Cabello, L. Bartolomei, L. Teixeira, E. Montijano, and
1042
+ M. Chli, “Sweep-Your-Map: Efficient Coverage Planning for Aerial
1043
+ Teams in Large-Scale Environments,” IEEE Robotics and Automation
1044
+ Letters, 2022.
1045
+ [21] B. Zhou, F. Gao, L. Wang, C. Liu, and S. Shen, “Robust and Efficient
1046
+ Quadrotor Trajectory Generation for Fast Autonomous Flight,” IEEE
1047
+ Robotics and Automation Letters, 2019.
1048
+ [22] H. Oleynikova, M. Burri, Z. Taylor, J. Nieto, R. Siegwart, and E. Gal-
1049
+ ceran, “Continuous-Time Trajectory Optimization for Online UAV
1050
+ Replanning,” in IEEE/RSJ International Conference on Intelligent
1051
+ Robots and Systems (IROS), 2016.
1052
+ [23] L. Bartolomei, Y. Kompis, L. Teixeira, and M. Chli, “Autonomous
1053
+ Emergency Landing for Multicopters using Deep Reinforcement
1054
+ Learning,” in 2022 IEEE/RSJ International Conference on Intelligent
1055
+ Robots and Systems (IROS), 2022.
1056
+
cNFAT4oBgHgl3EQfYB2G/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ctAyT4oBgHgl3EQfjPgK/content/tmp_files/2301.00409v1.pdf.txt ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Diffusion Model based Semi-supervised Learning
2
+ on Brain Hemorrhage Images for Efficient
3
+ Midline Shift Quantification
4
+ Shizhan Gong1, Cheng Chen1, Yuqi Gong1, Nga Yan Chan2, Wenao Ma1,
5
+ Calvin Hoi-Kwan Mak3, Jill Abrigo2, Qi Dou1
6
+ 1Department of Computer Science and Engineering,
7
+ The Chinese University of Hong Kong, Hong Kong, China
8
+ 2Department of Imaging and Interventional Radiology,
9
+ The Chinese University of Hong Kong, Hong Kong, China
10
+ 3Queen Elizabeth Hospital, Hong Kong, China
11
+ Abstract. Brain midline shift (MLS) is one of the most critical factors
12
+ to be considered for clinical diagnosis and treatment decision-making for
13
+ intracranial hemorrhage. Existing computational methods on MLS quan-
14
+ tification not only require intensive labeling in millimeter-level measure-
15
+ ment but also suffer from poor performance due to their dependence on
16
+ specific landmarks or simplified anatomical assumptions. In this paper,
17
+ we propose a novel semi-supervised framework to accurately measure the
18
+ scale of MLS from head CT scans. We formulate the MLS measurement
19
+ task as a deformation estimation problem and solve it using a few MLS
20
+ slices with sparse labels. Meanwhile, with the help of diffusion models,
21
+ we are able to use a great number of unlabeled MLS data and 2793 non-
22
+ MLS cases for representation learning and regularization. The extracted
23
+ representation reflects how the image is different from a non-MLS image
24
+ and regularization serves an important role in the sparse-to-dense refine-
25
+ ment of the deformation field. Our experiment on a real clinical brain
26
+ hemorrhage dataset has achieved state-of-the-art performance and can
27
+ generate interpretable deformation fields.
28
+ Keywords: Computer-aided diagnosis · Semi-supervised learning · Dif-
29
+ fusion models · Intracranial hemorrhage
30
+ 1
31
+ Introduction
32
+ Intracranial hemorrhage (ICH) refers to brain bleeding within the skull, a se-
33
+ rious medical emergency that would cause severe disability or even death [1].
34
+ A characteristic symptom of severe ICH is brain midline shift (MLS), which
35
+ is the lateral displacement of midline cerebral structures (see Fig. 1). MLS is
36
+ an important and quantifiable indicator of the severity of mass effects and the
37
+ urgency of intervention [2,3,9]. For instance, the 5 millimeters (mm) threshold
38
+ of MLS is frequently used to determine whether immediate intervention and
39
+ close monitoring is required [4]. MLS quantification demands high accuracy and
40
+ arXiv:2301.00409v1 [cs.CV] 1 Jan 2023
41
+
42
+ 2
43
+ S.Z. Gong et al.
44
+ posterior falx
45
+ anterior falx
46
+ (a) No MLS
47
+ falx
48
+ mls
49
+ (b) MLS on falx
50
+ septum
51
+ pellucidum
52
+ mls
53
+ (c) MLS on septum
54
+ pellucidum
55
+ The third
56
+ ventricle
57
+ mls
58
+ (d) MLS on the
59
+ third ventricle
60
+ Fig. 1: Examples of head CT scans to illustrate how radiologists measure MLS.
61
+ Dash red line connecting the anterior falx and posterior falx denotes a hypothet-
62
+ ical normal midline. Blue circles denote the shifted landmarks. Perpendicular
63
+ red lines from the shifted landmarks to normal midline are measured as MLS
64
+ scale.
65
+ efficiency, which is difficult to achieve with manual quantification, especially in
66
+ emergencies, due to the variability in shift regions, unclear landmark boundaries,
67
+ and non-standard scanning pose. An automated MLS quantification algorithm
68
+ that can immediately and accurately quantify MLS is highly desirable to identify
69
+ urgent patients for timely treatment.
70
+ To measure MLS, clinicians usually first identify a few CT slices with large
71
+ shifts and then measure and identify the maximum deviation of landmarks such
72
+ as the septum pellucidum, third ventricle, or falx from their normal counterpart
73
+ as the final MLS distance (see examples in Fig. 1). Such a clinical fashion of
74
+ MLS quantification can be difficult to be translated into a well-defined automa-
75
+ tion process. Currently, there are only limited studies on automated MLS quan-
76
+ tification, using different strategies and varied labeling requirements. Nguyen
77
+ et al. proposed a landmark-based method that relies on anatomical markers to
78
+ determine the location of the deformed midline [9]. However, this method can
79
+ only apply to cases where MLS appears on these specific marker regions. Liao et
80
+ al. adopted a symmetric-based method to seek a curve connecting all deformed
81
+ structures [10], which is difficult to generalize due to over-simplified anatom-
82
+ ical assumptions and sensitivity to patients’ scan poses. A few recent works
83
+ try to overcome these limitations by using stronger supervision with dense la-
84
+ beling. Some studies formulated MLS quantification as a midline segmentation
85
+ task [5,6,7], by delineating the intact midline as labels to supervise the train-
86
+ ing of segmentation models. Another study designed a hemisphere segmentation
87
+ task to quantify MLS [8], which requires pixel-wise annotation for each slice.
88
+ However, obtaining such dense annotations is very costly and time-consuming,
89
+ while may not be necessary for MLS quantification.
90
+ To tackle these limitations, we propose to fit MLS quantification into a de-
91
+ formation prediction problem by using semi-supervised learning (SSL) with only
92
+ limited annotations. Our framework avoids the strong dependency on specific
93
+
94
+ Diffusion model based ICH midline shift quantification
95
+ 3
96
+ landmarks or over-simplified assumptions in previous methods while not increas-
97
+ ing the labeling efforts. We aim to use only sparse and weak labels as ground
98
+ truth supervisions, which are just one shifted landmark and its normal counter-
99
+ part on a limited number of slices provided by radiologists, but we try to fully
100
+ exploit the unlabeled slices and non-MLS data to impose extra regularization
101
+ for the sparse-to-dense extension. Existing SSL methods typically use a partially
102
+ trained model with labeled data to generate pseudo labels for unlabeled data,
103
+ assuming that labeled and unlabeled data are generally similar. These meth-
104
+ ods can be sub-optimal in our case as labeled slices of MLS usually present the
105
+ largest deformation while unlabeled slices contain only minor or no deformation.
106
+ Instead, we propose our SSL strategy by generating a corresponding non-MLS
107
+ image for each unlabeled MLS slice with generative models and regularizing
108
+ that the deformation field should warp the generated non-MLS images into the
109
+ original MLS ones. However, as we only have volume-wise labels for MLS and
110
+ non-MLS classification, it can be difficult to train a slice-wise discriminator as
111
+ required by many generative models such as GANs [12]. Fortunately, the recently
112
+ proposed diffusion models [15], which prove to have strong power in both dis-
113
+ tribution learning and image generation without dependency on discriminators,
114
+ can be a potentially good solution.
115
+ In this work, we propose a novel semi-supervised learning framework based
116
+ on diffusion models to quantify the brain MLS from head CT images with defor-
117
+ mation prediction. Our method effectively exploits supervision and regulariza-
118
+ tion from all types of available data including MLS images with sparse ground
119
+ truth labels, MLS images without labels, and non-MLS images. We validate our
120
+ method on a real clinical head CT dataset, showing effectiveness of each proposed
121
+ component. Our contributions include: (1) innovating an effective deformation
122
+ strategy for brain MLS quantification, (2) incorporating diffusion models as a
123
+ representation learner to extract features reflecting where and how an MLS im-
124
+ age differs from a non-MLS image, and (3) proposing a diffusion model-based
125
+ semi-supervised framework that can effectively leverage massive unlabelled data
126
+ to improve the model performance.
127
+ 2
128
+ Methods
129
+ Fig. 2 illustrates our diffusion model-based semi-supervised learning framework
130
+ for MLS quantification via deformation prediction. In Sec. 2.1, we introduce our
131
+ deformation prediction by using only sparse supervision. In Sec. 2.2, we propose
132
+ to incorporate non-MLS data for representation learning. In Sec. 2.3, we describe
133
+ how to utilize unlabeled MLS images for sparse-to-dense regularization.
134
+ 2.1
135
+ MLS Quantification through Deformation Estimation
136
+ Our proposed deformation strategy for brain MLS quantification aims to find
137
+ an optimal deformation field φ so that an MLS image can be regarded as a
138
+
139
+ 4
140
+ S.Z. Gong et al.
141
+ ◼ Semi-supervised framework
142
+ 𝑥𝑢
143
+ 𝑥𝑙
144
+ Deformation
145
+ estimation
146
+ Image
147
+ generation
148
+ 𝜙(𝑥′𝑢)
149
+ Weak label
150
+ Deformation network
151
+ Conditional
152
+ diffusion
153
+ network 𝐺𝜃𝑐
154
+ Unconditional
155
+ diffusion
156
+ network 𝐺𝜃𝑢
157
+ Ƹ𝜖𝜃𝑐 − Ƹ𝜖𝜃𝑢
158
+ 𝑥𝑡
159
+ 𝑥0
160
+ ◼ Deformation estimation
161
+ 𝜙
162
+ ◼ Image generation
163
+ Denoising with
164
+ 𝜆𝐺𝜃𝑐 − (1 − 𝜆)𝐺𝜃𝑢
165
+ 𝑥0
166
+ 𝑥𝐿
167
+ 𝑥0
168
+ ,
169
+ 𝑙𝑚𝑠𝑒 + 𝑙𝑐𝑒𝑖𝑙
170
+ 𝑙ℎ𝑢𝑏𝑒𝑟
171
+ + 𝑙𝑠𝑚𝑜𝑜𝑡ℎ
172
+ ⊕ ⊕
173
+ 𝜖
174
+ 𝜖
175
+ warp
176
+ 𝜙𝑙
177
+ 𝜙𝑢
178
+ 𝑥′𝑢
179
+ Fig. 2: The pipeline of our proposed semi-supervised deformation strategy for
180
+ MLS quantification. The labeled image xl is supervised by sparse labels and the
181
+ unlabeled image xu is self-supervised with generated negative image.
182
+ hypothetically non-MLS image warped with this deformation field. The defor-
183
+ mation field can be parameterized by a function with high complexity so that
184
+ it does not explicitly rely on a single landmark or over-simplified symmetric as-
185
+ sumptions, which naturally overcomes the limitations of existing methods. We
186
+ apply a learning-based framework to parameterize the deformation field with a
187
+ U-Net shape neural network. The output of the network is the stationary veloc-
188
+ ity field v. The diffeomorphic deformation field φ is then calculated through the
189
+ integration of the velocity field, similarly to VoxelMorph [11] for image regis-
190
+ tration. The learning process is supervised by sparse deformation ground truth.
191
+ For each labeled slice, we have the ground truth y = (y1, y2), which is a two-
192
+ dimensional vector directing from shifted landmark point toward its presumably
193
+ normal location (the red arrow in Fig. 2). The predicted deformation ˆy is bi-
194
+ linearly interpolated at the shifted landmark point from the deformation field,
195
+ which is also a two-dimensional vector. To alleviate the influence of a few ex-
196
+ tremely large deformation points and increase model’s robustness, we use Huber
197
+ loss to measure the similarity between the predicted deformation and the label:
198
+ lhuber(yd, ˆyd) =
199
+
200
+
201
+
202
+ |yd − ˆyd|,
203
+ |yd − ˆyd| ≥ c,
204
+ (yd − ˆyd)2 + c2
205
+ 2c
206
+ ,
207
+ |yd − ˆyd| < c.
208
+ (1)
209
+ where d ∈ {1, 2}. The hyperparameter c defines the range for absolute error or
210
+ squared error. We also encourage a smooth deformation field with a diffusion
211
+ regularizer on the spatial gradients of deformation φ to avoid a discontinuous
212
+
213
+ Diffusion model based ICH midline shift quantification
214
+ 5
215
+ deformation field:
216
+ lsmooth =
217
+
218
+ j
219
+
220
+ k
221
+ ∥φjk − φ(j−1)k∥2 + ∥φjk − φj(k−1)∥2,
222
+ (2)
223
+ As the deformation can be extremely large in our case, and meanwhile to force a
224
+ smooth transition between the deformation peak and its adjacent pixels, we use
225
+ a coarse-to-fine manner, where velocity fields are generated through upsampling
226
+ with skip connection to progressively aggregate features of different scales.
227
+ 2.2
228
+ Learning Negative Patterns from Non-MLS Images
229
+ In order to learn a deformation field to warp a non-MLS image into MLS one,
230
+ ideally we would need a pair of non-MLS and MLS images for network train-
231
+ ing, which however does not exist in practice. Lacking such information makes
232
+ the network difficult to learn. A naive solution is to generate a corresponding
233
+ non-MLS image. However, generated images entail some randomness and can
234
+ often lack important details. Depending too much on such fake inputs can lead
235
+ to poor robustness. Inspired by the score-matching interpretation of diffusion
236
+ models [17], we propose to learn the non-MLS distribution from massive amount
237
+ of negative cases. Given an MLS image, we can evaluate which parts of the im-
238
+ age make it different from a non-MLS image. This deviation can serve as latent
239
+ features that help the deformation network with deformation prediction.
240
+ Diffusion models, especially DDPM [14], define a forward diffusion process as
241
+ the Markov process progressively adding random Gaussian noise to a given image
242
+ and then trying to approximate the reverse process by a Gaussian distribution.
243
+ The forward process can be simplified by a one-step sampling: xt = √αtx0 +
244
+ √1 − αtϵ, where αt := �t
245
+ s=0 1 − βt, and βt are predefined variance schedule.
246
+ ϵ is sampled from N(0, I). The mean µθ(xt, t) and variance Σθ(xt, t) of the
247
+ reverse process can be parameterized by neural networks. A popular choice is
248
+ to re-parameterize µθ(xt, t) so that ˆϵθ(xt, t) instead of µθ(xt, t) is estimated by
249
+ neural networks to approximate the noise ϵ. Moreover, the output of the diffusion
250
+ network ϵ(xt, t) is actually a scaled score function ∇ log p(xt) as it moves the
251
+ corrupted image towards the opposite direction of the corruption. [18].
252
+ As a result, through pre-training one unconditional diffusion model trained
253
+ with all data (denoted as U) and one conditional diffusion model trained with
254
+ only non-MLS data (denoted as C), the subtraction of two outputs
255
+ ˆϵθU (xt, t) − ˆϵθC(xt, t) ∝ ∇ log p(xt|n) − ∇ log p(xt) = ∇ log p(n|xt),
256
+ (3)
257
+ can be regarded as the gradient of class prediction (n = 1 for non-MLS and 0
258
+ otherwise) w.r.t to the input image, which reflects how the input images devi-
259
+ ate from a non-MLS image. This latent contains information regarding how to
260
+ transform the MLS positive image into a non-MLS one and therefore is helpful
261
+ for training the deformation network. Moreover, this feature representation ex-
262
+ hibits less fluctuation toward the randomness of the additive noise because the
263
+ stochastic parts are eliminated through subtraction. It is more stable than the
264
+
265
+ 6
266
+ S.Z. Gong et al.
267
+ predicted noise or generated MSL negative images. For training, we randomly
268
+ sample t from 0 to the diffusion steps Ttrain, while for inference we fix it to be a
269
+ certain value. We examine the effects of this value in Section 3.4.
270
+ 2.3
271
+ Semi-Supervised Deformation Regularization
272
+ Deformation estimation is a dense prediction problem, while we only have sparse
273
+ supervision. This can lead to flickering and poor generalizability if the deforma-
274
+ tion lacks certain regularization. On the other hand, we have a significant amount
275
+ of unlabeled data from the MLS volumes that is potentially helpful. Therefore,
276
+ we propose to include these unlabeled data during training in a semi-supervised
277
+ manner, so that unlabeled data can provide extra regularization for training or
278
+ produce additional training examples based on noisy pseudo labels. Many exist-
279
+ ing semi-supervised methods seek to use the prediction for unlabeled data given
280
+ by the same or a twin network as pseudo-labels and then supervise the model or
281
+ impose some regularization with these pseudo-labels. However, these methods
282
+ hold a strong assumption that labeled and unlabeled data are drawn from the
283
+ same distribution, which is not true in our case because most labeled data are
284
+ with large deformation while unlabeled data are with minor or no deformation.
285
+ Therefore, we want to find another type of pseudo-label to bypass the distribu-
286
+ tion assumption. As the deformation field is assumed to warp a hypothetically
287
+ normal image into an MLS one, we generate hypothetically non-MLS images x′
288
+ 0
289
+ using pre-trained diffusion models through classifier-free guidance [16]:
290
+ ˆϵ(xt, t) = γˆϵθC(xt, t) + (1 − γ)ˆϵθU (xt, t),
291
+ (4)
292
+ where γ is a hyper-parameter controlling the strength of the condition. We com-
293
+ pare x′
294
+ 0 warped with the deformation field φ(x′
295
+ 0) and calculate its similarity with
296
+ the original x0 through MSE loss. As it can be difficult for the generated image
297
+ to be fully faithful to the original image because the generative process entails a
298
+ lot of random sampling, this lmse can only serve as noisy supervision. Therefore,
299
+ instead of generating x′
300
+ 0 ahead of deformation network training, we generate it
301
+ in an ad-hoc way so that the noisy effects can be counteracted.
302
+ The final MLS measurement is estimated by calculating the length of the
303
+ maximum displacement vector from the predicted deformation field, so it is
304
+ more sensitive to over-estimation. And our results also show most of the errors
305
+ come from over-estimation. As for unlabelled slices, we still have the prior that
306
+ its MLS cannot be larger than the MLS of that specific volume δ, we propose to
307
+ incorporate an additional ceiling loss to punish the over-estimation:
308
+ lceil =
309
+
310
+ j
311
+
312
+ k
313
+ max(0, ||φjk|| − δ).
314
+ (5)
315
+ Overall, the loss term is a combination of supervised loss and unsupervised loss,
316
+ with a weight term controlling the relative importance of each loss term:
317
+ l = lhuber + w1lsmooth + u(i)(lmse + w2lceil),
318
+ (6)
319
+
320
+ Diffusion model based ICH midline shift quantification
321
+ 7
322
+ where w1 and w2 are two fixed weight terms and u(i) is a time-varying weight
323
+ term that is expected to gradually increase as the training iteration i progresses
324
+ so that the training can converge quickly through strong supervision first and
325
+ then refine and enhance generalizability via unsupervised loss.
326
+ 3
327
+ Experiments and Results
328
+ 3.1
329
+ Data Acquisition and Preprocessing
330
+ We retrospectively collected anonymous thick-slice, non-contrast head CT of pa-
331
+ tients who were admitted with head trauma or stroke symptoms and diagnosed
332
+ with various subtypes of intracranial hemorrhage, including epidural hemor-
333
+ rhage, subdural hemorrhage, subarachnoid hemorrhage, intraventricular hem-
334
+ orrhage, and intraparenchymal hemorrhage, between July 2019 and December
335
+ 2019 in the Prince of Wales Hospital, a public hospital under the Hospital Au-
336
+ thority of Hong Kong. The ethics approval was obtained from the Joint Chinese
337
+ University of Hong Kong-New Territories East Cluster Clinical Research ethics
338
+ committee. The eligible patients comprised 2793 CT volumes, among them 124
339
+ are MLS positive cases. The MLS ranges between 2.24mm and 20.12mm, with
340
+ mean value of 8.34mm and medium value of 8.73mm. The annotation was per-
341
+ formed by two trained physicians and verified by one experienced radiologist
342
+ (with over 10 years of clinical experience on ICH). The labeling process followed
343
+ a real clinical measurement pipeline, where the shifted landmark, anterior falx
344
+ point, and posterior falx point were pointed out, and the length of the vertical
345
+ line from the landmark to the line connecting the anterior falx point and the
346
+ posterior falx point was the measured MLS value. For each volume, a few slices
347
+ with large deformation were separately measured and annotated while the shift
348
+ of the largest one served as the case-level label. On average, 4 out of 30 slices
349
+ of each volume were labeled. We discarded the first 8 and the last 5 slices as
350
+ they are mainly structures irrelevant to MLS. For pre-processing, we adjusted
351
+ the pixel size of all images to 0.86mm and then cropped or padded the resulting
352
+ images to the resolution of 256 × 256 pixels. The HU window was set to 0 and
353
+ 80. We applied intensity clipping (0.5 and 99.5 percentiles) and min-max nor-
354
+ malization (between -1 and 1) to each image. Random rotation between −15◦
355
+ and 15◦ was used for data augmentation.
356
+ 3.2
357
+ Implementation Details
358
+ For the diffusion network, we use the network architecture designed in DDPM [15]
359
+ and set the noise level from 10−4 to 2 × 10−2 by linearly scheduling with
360
+ Ttrain = 1000. For non-MLS image generation, we apply the Denoising Diffu-
361
+ sion Implicit Model (DDIM) [13] with 50 steps and set the noise scale to 15 to
362
+ shorten the generative time. We set the hyper-parameters as α = 1, β = 1, c = 3
363
+ and γ = 2. u(i) is set from 1 to 10 with the linear schedule. The diffusion models
364
+ are trained by the AdamW optimizer with an initial learning rate of 1 × 10−4,
365
+
366
+ 8
367
+ S.Z. Gong et al.
368
+ Table 1: Comparison of different methods with 5-fold cross-validation.
369
+ Methods
370
+ Training data
371
+ Volume-wise
372
+ Slice-wise
373
+ Labeled
374
+ Unlabeled
375
+ MAE↓
376
+ (mm)
377
+ RMSE↓
378
+ (mm)
379
+ MAE↓
380
+ (mm)
381
+ RMSE↓
382
+ (mm)
383
+ Regression
384
+
385
+ 3.91
386
+ 4.90
387
+ 3.56
388
+ 4.16
389
+ Deformation
390
+
391
+ 3.80
392
+ 4.47
393
+ 2.51
394
+ 3.17
395
+ Mean-Teacher [19]
396
+
397
+
398
+ 2.89
399
+ 3.67
400
+ 2.43
401
+ 3.22
402
+ CPS [20]
403
+
404
+
405
+ 2.72
406
+ 3.42
407
+ 2.38
408
+ 3.15
409
+ Ours
410
+
411
+
412
+ 2.43
413
+ 3.17
414
+ 2.25
415
+ 3.09
416
+ batch size 4, for 2 × 105 iterations. We up-sample the MLS positive data by 10×
417
+ when training the unconditional diffusion model. The deformation network is
418
+ trained by the AdamW optimizer with an initial learning rate of 1×10−4, batch
419
+ size 16, for 100 epochs. All models are implemented with PyTorch 1.12.1 using
420
+ one Nvidia GeForce RTX 3090 GPU.
421
+ 3.3
422
+ Quantification Accuracy and Deformation Quality
423
+ We evaluate the performance of our quantification strategy through mean abso-
424
+ lute error (MAE) and root mean square error (RMSE). For volume-wise evalua-
425
+ tion, we measure the maximum deformation of each slice of the whole volume and
426
+ select the largest one as the final result. We also report the slice-wise evaluation,
427
+ which is calculated based on labeled slices. This error can reflect how the models
428
+ perform on slices with relatively large deformation. Since existing MLS estima-
429
+ tion methods require different types of labels from ours, it is difficult to directly
430
+ compare with those methods. We therefore first compare our deformation-based
431
+ strategy with a regression-based strategy, which uses DenseNet-121 [21] to di-
432
+ rectly predict the slice-wise MLS. We also compare our proposed semi-supervised
433
+ learning approach with two popular semi-supervised learning methods, that are
434
+ Mean-Teacher [19] and Cross Pseudo Supervision (CPS) [20], which are imple-
435
+ mented into our deformation framework. The results are given in Table 1, which
436
+ are based on 5-fold cross-validations.
437
+ From the results, we can see that when only using labeled MLS slices for
438
+ model learning, our deformation strategy already shows better performance than
439
+ the regression model. This may attribute to that our deformation model learns
440
+ the knowledge of both MLS values and locations while a regression model only
441
+ captures the MLS value information. This difference can be further enlarged if
442
+ we consider slice-wise performance. Moreover, all three semi-supervised learn-
443
+ ing methods, i.e., Mean-Teacher, CPS, and ours, consistently improve the per-
444
+ formance of deformation prediction, showing the benefits and importance of
445
+
446
+ Diffusion model based ICH midline shift quantification
447
+ 9
448
+ true:
449
+ pred:
450
+ 11.00
451
+ 11.43
452
+ true:
453
+ pred:
454
+ 5.43
455
+ 6.02
456
+ true:
457
+ pred:
458
+ 6.23
459
+ 6.78
460
+ true:
461
+ pred:
462
+ 7.48
463
+ 8.87
464
+ (a) Examples of predicted deformation on MLS images
465
+ true:
466
+ pred:
467
+ 0.00
468
+ 1.60
469
+ true:
470
+ pred:
471
+ 0.00
472
+ 1.75
473
+ true:
474
+ pred:
475
+ 0.00
476
+ 1.87
477
+ true:
478
+ pred:
479
+ 0.00
480
+ 1.69
481
+ (b) Examples of predicted deformation on non-MLS images
482
+ Fig. 3: Predicted deformation on (a) MLS images. (b) non-MLS images. The
483
+ regions with the largest deformation are highlighted. Slice-wise predicted MSL
484
+ and ground truth are provided.
485
+ incorporating unlabeled data into model learning. Our semi-supervised learn-
486
+ ing method based on diffusion models achieves better quantification results
487
+ than Mean-Teacher and CPS, significantly reducing the volume-wise MAE from
488
+ 3.80mm to 2.43mm. An interesting observation is that the unlabeled data con-
489
+ tribute more to the volume-wise evaluation than the slice-wise evaluation. By
490
+ inspecting the prediction, we find that the deformation prediction trained with
491
+ labeled data tends to overestimate the deformation of slices with little or no
492
+ deformation, which makes the volume-wise prediction error-prone. As most un-
493
+ labeled data are slices with minor shifts, incorporating these data for semi-
494
+ supervised learning can impose constraints to avoid large deformation, which
495
+ greatly improves the model’s robustness.
496
+ We also visualize the predicted deformation field of several sample cases.
497
+ From Fig. 3 (a), we can see the model can well posit the location where the
498
+ maximum shift appears and push it to its hypothetically normal counterpart.
499
+ The largest deformation happens exactly at the site with the maximum shift. To
500
+ validate the robustness of our model, we also select several patients diagnosed
501
+ with no MLS and plot the predicted deformation of these samples. As can be
502
+ seen in Fig. 3 (b), our method is able to provide a reasonable prediction for
503
+ non-MLS images by outputting much smaller values than that for MLS images.
504
+ Our model’s predictions for non-MLS images are not exactly zero are caused on
505
+ one hand by that even for a completely healthy person, the midline cannot be
506
+ perfectly aligned due to multiple factors such as scan pose, on the other hand, our
507
+
508
+ O10
509
+ S.Z. Gong et al.
510
+ Methods
511
+ MAE↓
512
+ (mm)
513
+ RMSE↓
514
+ (mm)
515
+ Fully-supervised
516
+ 3.61
517
+ 4.47
518
+ + Representation
519
+ 3.22
520
+ 3.69
521
+ Semi-supervised
522
+ 2.61
523
+ 3.24
524
+ + Representation
525
+ 2.45
526
+ 3.05
527
+ Table 2: Effects of the representation.
528
+ 0
529
+ 200
530
+ 400
531
+ 600
532
+ 800
533
+ t
534
+ 2.6
535
+ 2.8
536
+ 3.0
537
+ 3.2
538
+ 3.4
539
+ Effects of noise level t
540
+ MAE
541
+ RMSE
542
+ Fig. 4: Effects of the noise level.
543
+ models tend to overestimate the shift because we are calculating the maximum
544
+ deformation as final measurement.
545
+ 3.4
546
+ Ablation Study
547
+ We conduct several ablation experiments to study the effects of several compo-
548
+ nents in our proposed framework on the model performance. The volume-wise
549
+ results reported are trained on four folders and tested on one folder.
550
+ Effects for representation learning. We first conduct ablation studies to
551
+ verify that the latent feature extracted from the two diffusion models is truly
552
+ useful for deformation prediction. To this end, we select two deformation models,
553
+ one trained with only labeled data and the other using semi-supervised learning,
554
+ and compare their performance with and without the extracted representation
555
+ as input. The results are given in Table 2. As expected, incorporating the rep-
556
+ resentation can improve the model performance in both cases.
557
+ The noise level is an important component of diffusion models. Only with a
558
+ proper noise level, can the model accurately estimate the deviation of the image
559
+ toward the negative sample space. Therefore, we do inference with multiple noise
560
+ levels and compare its effect on model performance. The results are shown in
561
+ Fig. 4. Our model is very robust towards this hyper-parameter. As long as t is
562
+ not too small, the model gives very similar performances. The best performance
563
+ appears in the middle when t = 600. This is reasonable as small noise fails to
564
+ corrupt the original image thus degenerating the performance of score estimation
565
+ while large noise may obscure too many details of the original image.
566
+ Quantity of unlabeled images. To verify the usefulness of unlabeled images,
567
+ we conduct ablation studies on the number of unlabeled images used. For each
568
+ experiment, we randomly sample 20%, 40%, 60%, and 80% volumes, and we
569
+ incorporate unlabeled slices of these volumes for semi-supervised training. For
570
+ the rest volumes, we are only using the labeled slices. We also do one experiment
571
+ that completely removes the uses of unlabeled images. For each experiment, the
572
+ pre-trained diffusion models are the same, which uses all the data. In other
573
+ words, these unlabeled images somehow still contribute to the model training.
574
+ The results are shown in Fig. 5 (a). As can be seen, the model performance
575
+ and robustness can be enhanced as we incorporate more unlabeled images. This
576
+
577
+ Diffusion model based ICH midline shift quantification
578
+ 11
579
+ (a)
580
+ (b)
581
+ Fig. 5: Results of our ablation experiments in terms of: (a) proportion of unla-
582
+ beled data used, and (b) proportion of negative data used.
583
+ provides strong evidence for our claim that our model truly learns valuable
584
+ information from unlabeled data.
585
+ Quantity of non-MLS images. To further measure the benefits of includ-
586
+ ing non-MLS cases, we conduct another ablation study on the proportion of
587
+ non-MLS cases. Non-MLS cases are used to train diffusion models. As currently,
588
+ the amount of non-MLS cases is much higher than MLS cases, we upsample the
589
+ MLS cases so that their quantities are approximately the same when training the
590
+ unconditional diffusion model. For ablation, we first downsample the non-MLS
591
+ data so that their quantity is 1×, 5×, and 10× that of the MLS cases, and then
592
+ upsample the MLS cases to make them balanced. From the results in Fig. 5 (b),
593
+ we find model performance improves with more non-MLS cases incorporated. In-
594
+ creasing non-MLS cases can help train diffusion models and further improve the
595
+ quality of generated images and extracted feature representations. However, this
596
+ effect will soon be saturated as the amount of MLS cases is relatively small. This
597
+ can be a bottleneck for effectively using the non-MLS cases as it is challenging
598
+ to train unconditional diffusion models with such imbalanced datasets.
599
+ 4
600
+ Conclusions and Future Work
601
+ In this paper, we propose a novel framework based on deformation field esti-
602
+ mation to automatically measure the brain MLS. The labels we are using are
603
+ sparse which can greatly alleviate the labeling workload. We also propose a
604
+ semi-supervised learning strategy based on diffusion models which significantly
605
+ improves the model performance. Experiments on a clinic dataset show our meth-
606
+ ods can achieve satisfying performance. We also verify that using unlabeled data
607
+ and non-MLS cases can truly help improve the model’s performance.
608
+ Our methods have several limitations. First, the model performance highly
609
+ relies on pre-trained diffusion models. Training diffusion models with extremely
610
+ imbalanced data requires great effort. Second, the measurement results exhibit
611
+ randomness due to noise corruption. Finally, the measurement results are prone
612
+ to overestimation. Our future work will figure out solutions for these limitations.
613
+
614
+ Effects of unlabeled data
615
+ 4.5
616
+ MAE
617
+ RMSE
618
+ 4.0
619
+ 3.5
620
+ 3.0
621
+ 2.5
622
+ 2.0
623
+ 0
624
+ 20%
625
+ 40%
626
+ 60%
627
+ 80%
628
+ 100%
629
+ proportion of unlabeled image usedEffects of negative cases
630
+ 5.0
631
+ MAE
632
+ 4.5
633
+ RMSE
634
+ 4.0
635
+ 3.5
636
+ 3.0
637
+ 2.5
638
+ 2.0
639
+ 0
640
+ 1X
641
+ 5x
642
+ 10x
643
+ all
644
+ ratio of negative cases / positive cases12
645
+ S.Z. Gong et al.
646
+ References
647
+ 1. Caceres, J.A., Goldstein, J.N.: Intracranial hemorrhage. Emerg. Med. Clin. North
648
+ Am. 30(3), 771 (2012)
649
+ 2. Quattrocchi, K.B., et al.: Quantification of midline shift as a predictor of poor out-
650
+ come following head injury. In: Surgical Neurology, vol. 35(3), pp. 183–188, (1991).
651
+ 3. Yang, W., Li, Q., Li, R., Liu, Q., Wang, X., et al.: Defining the optimal midline
652
+ shift threshold to predict poor outcome in patients with supratentorial spontaneous
653
+ intracerebral hemorrhage. In: Neurocritical care, vol. 28(3), pp.314–321, (2018).
654
+ 4. Liao, C., Chen, Y., and Xiao, F.: Brain Midline Shift Measurement and Its Au-
655
+ tomation: A Review of Techniques and Algorithms. In: International Journal of
656
+ Biomedical Imaging, vol. 4303161, (2018).
657
+ 5. Pisov, M., et al.: Incorporating task-specific structural knowledge into CNNs for
658
+ brain midline shift detection. In: Suzuki, K., et al. (eds.) ML-CDS/IMIMIC -2019.
659
+ LNCS, vol. 11797, pp. 30–38. Springer, Cham (2019).
660
+ 6. Wang, S., Liang, K., Li, Y., et al.: Context-aware refinement network incorporating
661
+ structural connectivity prior for brain midline delineation. In: Martel, A.L., et al.
662
+ (eds.) MICCAI 2020. LNCS, vol. 12267, pp. 208–217. Springer, Cham (2020).
663
+ 7. Wei, H., et al.: Regression-based line detection network for delineation of largely
664
+ deformed brain midline. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11766,
665
+ pp. 839–847. Springer, Cham (2019).
666
+ 8. Qin, C., Li, H., Liu, Y., Shang, H., Pei, H., Wang, X., Chen, Y., Chang, J., Feng,
667
+ M., et al.: 3D Brain Midline Delineation for Hematoma Patients. In: Bruijne, M.,
668
+ et al. (eds.) MICCAI 2021. LNCS, vol. 12905, pp. 510–518. Springer, Cham (2021).
669
+ 9. Nguyen, N.P., Yoo, Y., Chekkoury, A., Eibenberger, E., et al.: Brain midline shift
670
+ detection and quantification by a cascaded deep network pipeline on non-contrast
671
+ computed tomography scans. In: ICCVW 2021.
672
+ 10. Liao, C., Xiao, F., et al.: Automatic recognition of midline shift on brain CT
673
+ images. In: Computers in Biology and Medicine. vol. 40, pp.331-339. (2010)
674
+ 11. Balakrishnan, G., Zhao, A., Sabuncu, M.R., Guttag, J., Dalca, A.V.: Voxelmorph:
675
+ a learning framework for deformable medical image registration. IEEE transactions
676
+ on medical imaging 38(8), 1788–1800 (2019)
677
+ 12. Goodfellow, I.J.,Pouget, J.A., Mirza, M., Xu, B., Farley, D.W., Ozair, S., Courville,
678
+ A., Bengio, Y.: Generative Adversarial Networks. In: NeurIPS 2014.
679
+ 13. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: ICLR 2020.
680
+ 14. Ho, J., et al.: Denoising diffusion probabilistic models. In: NeurIPS 2020.
681
+ 15. Dhariwal, et al.: Diffusion models beat gans on image synthesis. In: NeurIPS 2021.
682
+ 16. Ho, J., and Salimans. T.: Classifier-free diffusion guidance. In: NeurIPS 2021 Work-
683
+ shop on Deep Generative Models and Downstream Applications (2021).
684
+ 17. Song, Y., and Ermon, S.: Generative Modeling by Estimating Gradients of the
685
+ Data Distribution. In: NeurIPS 2019.
686
+ 18. Luo, C.: Understanding Diffusion Models: A Unified Perspective. arXiv preprint
687
+ arXiv:2208.11970 (2022)
688
+ 19. Tarvainen, A., Valpala, H.: Mean teachers are better role models: Weight-averaged
689
+ consistency targets improve semi-supervised deep learning results. In: NeurIPS 2017.
690
+ 20. Chen, X., Yuan, Y., Zeng, G., Wang, J.: Semi-Supervised Semantic Segmentation
691
+ with Cross Pseudo Supervision. In: CVPR 2021.
692
+ 21. Huang, G., Liu, Z., van der Maaten, L., Weinberger, K.Q.: Densely Connected
693
+ Convolutional Networks. In: CVPR 2017.
694
+
ctAyT4oBgHgl3EQfjPgK/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
dtAzT4oBgHgl3EQf3f5y/content/tmp_files/2301.01830v1.pdf.txt ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01830v1 [hep-ph] 4 Jan 2023
2
+ Search for an Ultraviolet Zero in the Seven-Loop Beta Function of the λφ4
3
+ 4 Theory
4
+ Robert Shrock
5
+ C. N. Yang Institute for Theoretical Physics and
6
+ Department of Physics and Astronomy
7
+ Stony Brook University, Stony Brook, NY 11794
8
+ We investigate whether the seven-loop beta function of the λφ4
9
+ 4 theory exhibits evidence for
10
+ an ultraviolet zero. In addition to a direct analysis of the beta function, we calculate and study
11
+ Pad´e approximants and discuss effects of scheme transformations on the results. Confirming and
12
+ extending our earlier studies of the five-loop and six-loop beta functions, we find that in the range
13
+ of λ where the perturbative calculation of the seven-loop beta function is reliable, the theory does
14
+ not exhibit evidence for an ultraviolet zero.
15
+ I.
16
+ INTRODUCTION
17
+ In this paper we consider the renormalization-group
18
+ (RG) behavior of the λφ4 field theory in d = 4 spacetime
19
+ dimensions, where φ is a real scalar field. This theory,
20
+ commonly denoted φ4
21
+ 4, is described by the Lagrangian
22
+ L = 1
23
+ 2(∂νφ)(∂νφ) − m2
24
+ 2 φ2 − λ
25
+ 4! φ4 .
26
+ (1.1)
27
+ The Lagrangian (1.1) is invariant under the global dis-
28
+ crete Z2 symmetry φ → −φ. Quantum loop corrections
29
+ lead to a dependence of the physical quartic coupling
30
+ λ = λ(µ) on the Euclidean energy/momentum scale µ
31
+ at which this coupling is measured. The dependence of
32
+ λ(µ) on µ is described by the RG beta function of the
33
+ theory, βλ = dλ/dt, or equivalently, βa = da/dt, where
34
+ dt = d ln µ [1] and
35
+ a ≡
36
+ λ
37
+ (4π)2 .
38
+ (1.2)
39
+ (The argument µ will often be suppressed in the nota-
40
+ tion.) Since we will investigate the properties of the the-
41
+ ory for large µ in the ultraviolet (UV), the value of m2
42
+ will not play an important role in our analysis. For tech-
43
+ nical convenience, we assume that m2 is positive. At a
44
+ reference scale µ0, the quartic coupling λ(µ0) is taken to
45
+ be positive for the stability of the theory. The one-loop
46
+ term in this beta function has a positive coefficient, so
47
+ that for small λ, βλ > 0 and hence as µ → 0, the cou-
48
+ pling λ(µ) → 0, i.e., the theory is infrared (IR)-free. This
49
+ perturbative result is in agreement with nonperturbative
50
+ approaches [2]; some reviews include [3, 4].
51
+ The beta function βa has the series expansion
52
+ βa = a
53
+
54
+
55
+ ℓ=1
56
+ bℓ aℓ .
57
+ (1.3)
58
+ The n-loop (nℓ) beta function, denoted βa,nℓ, is given by
59
+ Eq. (1.3) with the upper limit of the loop summation in-
60
+ dex ℓ = n instead of ℓ = ∞. The one-loop and two-loop
61
+ terms in βa are independent of the scheme used for regu-
62
+ larization and renormalization, while terms of loop order
63
+ ℓ ≥ 3 are scheme-dependent [5, 6]. For the O(N) λ|⃗φ|4
64
+ theory with an N-component field, ⃗φ = (φ1, ..., φN), the
65
+ coefficients b1, b2, and b3 were calculated in [5]. Higher-
66
+ loop coefficients bℓ with ℓ ≥ 3 have been computed using
67
+ the MS minimal subtraction scheme [7, 8]. A calculation
68
+ of b5 and discussion of earlier computations of b4 and
69
+ b5 (e.g., [9–11]) was given in [4, 12]. The coefficient b6
70
+ was calculated for N = 1 in [13] and for general N in
71
+ [14]. Most recently, the seven-loop coefficient b7 was cal-
72
+ culated in [15]. In analyzing the series expansion (1.3),
73
+ one recalls that it is an asymptotic expansion and the
74
+ large-order behavior has been the subject of extensive
75
+ study [16], including [17] and references therein.
76
+ An interesting question is whether, for the region of
77
+ λ where a perturbative calculation of βλ is reliable, this
78
+ beta function exhibits evidence for a zero at some (pos-
79
+ itive) value of the quartic coupling. This would be an
80
+ ultraviolet fixed point (UVFP) of the renormalization
81
+ group, i.e., as µ → ∞, λ(µ) would approach this value
82
+ (from below). In previous work we have investigated this
83
+ question up to the five-loop order for the O(N) λ|⃗φ|4
84
+ theory in [18] and up to the six-loop order for the real
85
+ λφ4 theory in [19] and the O(N) λ|⃗φ|4 theory in [20],
86
+ finding evidence against such a UVFP. In the present
87
+ paper, using the results of [15], we extend our analysis to
88
+ the seven-loop level. Our analysis in [20] covered a large
89
+ range of specific N values and also included an argument
90
+ for the absence of a UV zero in the (rescaled) n-loop beta
91
+ function at large N (see Eqs. (3.12)-(3.13) in [20]). Thus,
92
+ it will suffice to focus on the N = 1 theory here.
93
+ In view of this previous evidence against a UV zero in
94
+ βλ and associated UVFP in the O(N) λ|⃗φ|4 theory, it is
95
+ worthwhile to mention one case where an IR-free quan-
96
+ tum field theory is known to have a UVFP, namely, the
97
+ nonlinear O(N) σ model in d = 2 + ǫ spacetime dimen-
98
+ sions. In this theory, an exact solution was obtained in
99
+ the limit N → ∞ with λ(µ)N = x(µ) a fixed function of
100
+ µ and yielded the beta function
101
+ βx = dx
102
+ dt = ǫx
103
+
104
+ 1 −
105
+ x
106
+ xUV
107
+
108
+ (1.4)
109
+ for small ǫ, where xUV = 2πǫ is a UV fixed point of the
110
+ renormalization group [21]. Since the leading term in βx
111
+ is positive for ǫ > 0, this theory is IR-free. Thus, in this
112
+ nonlinear O(N) σ model in d = 2 + ǫ dimensions, the
113
+
114
+ 2
115
+ coupling x(µ) flows (monotonically) from x = 0 at µ = 0
116
+ to x = xUV as µ → ∞. Note that by making ǫ ≪ 1 one
117
+ can arrange that the UVFP at xUV = 2πǫ occurs at an
118
+ arbitrarily small value of the scaled coupling x.
119
+ This paper is organized as follows. In Section II we re-
120
+ view some relevant background. In Section III we present
121
+ the results of our analysis of the seven-loop beta function.
122
+ Section IV contains a further analysis of this question of
123
+ a UV zero using Pad´e approximants, while Section V dis-
124
+ cusses effects of scheme transformations. Our conclusions
125
+ are given in Section VI.
126
+ II.
127
+ BETA FUNCTION
128
+ The n-loop truncation of (1.3), denoted βa,nℓ, is a poly-
129
+ nomial in a of degree n + 1 having an overall factor of
130
+ a2. We may extract this factor and define a reduced beta
131
+ function
132
+ βa,r =
133
+ βa
134
+ βa,1ℓ
135
+ =
136
+ βa
137
+ b1a2
138
+ = 1 + 1
139
+ b1
140
+
141
+
142
+ ℓ=2
143
+ bℓaℓ−1 .
144
+ (2.1)
145
+ The n-loop truncation of βa,r, denoted βa,r,nℓ ≡ Rn, is
146
+ defined by taking the upper limit of the sum in (2.1) to
147
+ be ℓ = n rather than ℓ = ∞. .
148
+ The first two coefficients in the beta function of this
149
+ theory are b1 = 3 and b2 = −17/3 [5]. The coefficients bℓ
150
+ with 3 ≤ ℓ ≤ 7 and the resultant higher-loop beta func-
151
+ tion discussed below, are calculated in the MS scheme.
152
+ The coefficients up to the five-loop level are [4, 5, 9, 12]
153
+ b3 = 145
154
+ 8
155
+ + 12ζ3 = 32.5497 ,
156
+ (2.2)
157
+ b4 = −3499
158
+ 48
159
+ − 78ζ3 + 18ζ4 − 120ζ5
160
+ = −271.606 ,
161
+ (2.3)
162
+ and
163
+ b5 = 764621
164
+ 2304
165
+ + 7965
166
+ 16 ζ3 − 1189
167
+ 8
168
+ ζ4 + 987ζ5 + 45ζ2
169
+ 3
170
+ − 675
171
+ 2 ζ6 + 1323ζ7
172
+ = 2848.57 ,
173
+ (2.4)
174
+ where the floating-point values are given to the indicated
175
+ accuracy and
176
+ ζs =
177
+
178
+
179
+ n=1
180
+ 1
181
+ ns
182
+ (2.5)
183
+ is the Riemann zeta function. If s = 2r is even, then
184
+ ζs can be expressed as a rational number times π2r,
185
+ namely ζ2r = (−1)r+1B2r(2π)2r/[2(2r)!], where Bn are
186
+ the Bernoulli numbers; however, we leave these ζ2r in
187
+ their generic form here and below. The six-loop coeffi-
188
+ cient is [13, 14]
189
+ b6 = −18841427
190
+ 11520
191
+ − 779603
192
+ 240
193
+ ζ3 + 16989
194
+ 16
195
+ ζ4 − 63723
196
+ 10
197
+ ζ5 − 8678
198
+ 5
199
+ ζ2
200
+ 3 + 6691
201
+ 2
202
+ ζ6 + 162ζ3ζ4 − 63627
203
+ 5
204
+ ζ7
205
+ − 4704ζ3ζ5 + 264543
206
+ 25
207
+ ζ8 − 51984
208
+ 25
209
+ ζ3,5 − 768ζ3
210
+ 3 − 46112
211
+ 3
212
+ ζ9
213
+ = −34776.13 ,
214
+ (2.6)
215
+ where [22]
216
+ ζ3,5 =
217
+
218
+ m>n≥1
219
+ 1
220
+ n3m5 .
221
+ (2.7)
222
+ The seven-loop coefficient is considerably more compli-
223
+ cated than b6, and we refer the reader to [15] for the
224
+ analytic expression. The numerical value is
225
+ b7 = 474651.0 .
226
+ (2.8)
227
+ Thus, in summary, the seven-loop beta function of the
228
+ λφ4 theory (calculated in the MS scheme), is
229
+ βa,7ℓ = a2�
230
+ 3 − 17
231
+ 3 a + 32.5497a2 − 271.606a3
232
+ + 2848.57a4 − 34776.1a5 + 474651a6�
233
+ .
234
+ (2.9)
235
+ III.
236
+ ZEROS OF THE n-LOOP BETA FUNCTION
237
+ UP TO LOOP ORDER n = 7
238
+ In this section we investigate a possible UV zero, de-
239
+ noted aUV,nℓ, of the n-loop beta function, βa,nℓ.
240
+ The
241
+ double zero of βa,nℓ at a = 0 is always present (indepen-
242
+ dent of n); this is an infrared zero and hence will not be
243
+ of interest here.
244
+
245
+ 3
246
+ A necessary condition for there to be robust evidence
247
+ for a UV zero in the beta function of an IR-free the-
248
+ ory is that the values calculated at successive loop orders
249
+ should be close to each other.
250
+ Although the two-loop
251
+ beta function βa,2ℓ does have a UV zero, at aUV,2ℓ =
252
+ 9/17 = 0.52941, we found that the three-loop beta func-
253
+ tion βa,3ℓ has no UV zero and, while a UV zero is present
254
+ in βa,4ℓ, it occurs at a considerably smaller value, namely
255
+ aUV,4ℓ = 0.23332. At the five-loop level, βa,5ℓ has no UV
256
+ zero, while at the six-loop level, although βa,6ℓ has a UV
257
+ zero, it occurs at a still smaller value, aUV,6ℓ = 0.16041
258
+ [18, 19].
259
+ Thus, the results of this analysis show that
260
+ the necessary condition that the beta function calculated
261
+ to successively higher loop order should exhibit values
262
+ of aUV,nℓ that are close to each other is not satisfied by
263
+ this theory. At seven-loop order, using βa,7ℓ from [15],
264
+ we find that this function has no physical UV zero. In-
265
+ stead, the zeros are comprised of three complex-conjugate
266
+ pairs, −0.102135±0.079848i, 0.0142348±0.136854i, and
267
+ 0.124533 ± 0.0659940i. Summarizing,
268
+ aUV,2ℓ = 0.52941,
269
+ aUV,4ℓ = 0.23332,
270
+ aUV,6ℓ = 0.16041
271
+ no aUV,nℓ for n = 3, 5, 7.
272
+ (3.1)
273
+ The calculations up to seven loops show a pattern,
274
+ namely that for even n = 2, 4, 6, βa,nℓ has a zero, aUV,nℓ,
275
+ but the values for different n are not close to each other,
276
+ while for odd n = 1, 3, 5, 7, βa,nℓ has no UV zero.
277
+ In Fig. 1 we plot the n-loop beta functions for 2 ≤ n ≤
278
+ 7 loops. Another way to show this information is via the
279
+ n-loop reduced beta function, βa,r,nℓ = Rn. We plot Rn
280
+ in Fig. 2 for 2 ≤ n ≤ 7. The results discussed above are
281
+ evident in these figures. First, one may inquire how large
282
+ is the interval in a over which the calculations of βa,nℓ to
283
+ the respective n-loop orders are in mutual agreement. As
284
+ one can see from Figs. 1 and 2, the n-loop beta functions
285
+ βa,nℓ with 2 ≤ n ≤ 7 only agree with each other well over
286
+ the small interval of couplings 0 ≤ a <∼ 0.05. As shown
287
+ in Fig. 1, the βa,nℓ with even n = 2, 4, 6 reach maxima
288
+ and then decrease, crossing the (positive) real axis at
289
+ different values listed in Eq. (3.1), while the βa,nℓ with
290
+ odd n increase monotonically with a.
291
+ This seven-loop
292
+ analysis confirms and extends our conclusions in [19, 20]
293
+ at the six-loop level that the zero in the two-loop beta
294
+ function of the λφ4 theory occurs at too large a value of
295
+ a for the perturbative calculation to be reliable.
296
+ IV.
297
+ ANALYSIS WITH PAD´E APPROXIMANTS
298
+ One can gain further insight into the behavior of the
299
+ beta function by the use of Pad´e approximants (PAs).
300
+ We carried out this analysis up to the six-loop level in
301
+ [19, 20], finding no indication of a physical UV zero, and
302
+ here we extend it to the seven-loop level. Since the double
303
+ zero in βa,nℓ at a = 0 is not relevant to the question of a
304
+ UV zero, we use the reduced beta function βa,r,nℓ for this
305
+ 0.1
306
+ 0.2
307
+ 0.3
308
+ 0.4
309
+ 0.5
310
+ 0.6
311
+ a
312
+ -0.2
313
+ -0.1
314
+ 0.0
315
+ 0.1
316
+ 0.2
317
+ 0.3
318
+ 0.4
319
+ 0.5
320
+ beta
321
+ FIG. 1:
322
+ Plot of the n-loop β function βa,nℓ as a function of a for
323
+ (i) n = 2 (red, solid), (ii) n = 3 (green, dashed), (iii) n = 4 (blue,
324
+ dotted), (iv) n = 5 (black, dot-dashed), (v) n = 6 (cyan, solid),
325
+ and (vi) n = 7 (brown, solid). At a = 0.16, going from bottom to
326
+ top, the curves are for n = 6, n = 4, n = 2, n = 3, n = 5, n = 7.
327
+ 0.1
328
+ 0.2
329
+ 0.3
330
+ 0.4
331
+ 0.5
332
+ 0.6
333
+ a
334
+ 0.0
335
+ 0.5
336
+ 1.0
337
+ 1.5
338
+ R_n
339
+ FIG. 2: Plot of the ratio Rn of the n-loop beta function βa,nℓ
340
+ divided by βa,1ℓ, as a function of a for (i) n = 2 (red, solid), (ii)
341
+ n = 3 (green, dashed), (iii) n = 4 (blue, dotted), (iv) n = 5 (black,
342
+ dot-dashed), (v) n = 6 (cyan, solid), and (vi) n = 7 (brown, solid).
343
+ At a = 0.16, going from bottom to top, the curves are for n = 6,
344
+ n = 4, n = 2, n = 3, n = 5, and n = 7.
345
+ Pad´e analysis. The [p, q] Pad´e approximant to βa,r,nℓ is
346
+ the rational function [23]
347
+ [p, q]βa,r,nℓ =
348
+ 1 + �p
349
+ j=1 rjaj
350
+ 1 + �q
351
+ k=1 sk ak
352
+ (4.1)
353
+ with p + q = n − 1, where the coefficients rj and sj are
354
+ independent of a. At seven-loop order, we can calculate
355
+
356
+ 4
357
+ the Pad´e approximants [p, q]βa,r,7ℓ with [p, q] taking on
358
+ the values [6,0], [5,1], [4,2], [3,3], [2,4], [1,5], and [0,6].
359
+ Since the loop order is understood, we write [p, q]βa,r,7ℓ ≡
360
+ [p, q] for brevity of notation. The PA [6,0] is equivalent
361
+ to βa,r,7ℓ itself, which we have already analyzed, and the
362
+ PA [0,6] has no zeros, so we focus here on the remaining
363
+ five Pad´e approximants.
364
+ We list our results for these Pad´e approximants to
365
+ βa,r,7ℓ below:
366
+ [5, 1] = 1 + 11.760a − 14.931a2 + 57.552a3 − 286.17a4 + 1367.8a5
367
+ 1 + 13.649a
368
+ ,
369
+ (4.2)
370
+ [4, 2] = 1 + 20.541a + 75.687a2 − 49.670a3 + 81.973a4
371
+ 1 + 22.430a + 107.21a2
372
+ ,
373
+ (4.3)
374
+ [3, 3] = 1 + 25.073a + 152.81a2 + 155.99a3
375
+ 1 + 26.962a + 192.89a2 + 318.33a3 ,
376
+ (4.4)
377
+ [2, 4] =
378
+ 1 + 22.314a + 103.55a2
379
+ 1 + 24.203a + 138.42a2 + 89.390a3 − 91.252a4 ,
380
+ (4.5)
381
+ [1, 5] =
382
+ 1 + 14.023a
383
+ 1 + 15.912a + 19.205a2 − 45.828a3 + 196.10a4 − 910.03a5 .
384
+ (4.6)
385
+ We recall some necessary requirements for a zero of a
386
+ [p, q] Pad´e approximant to be physically relevant. These
387
+ include the requirement that this zero should occur on
388
+ the positive real axis in the complex a plane and the
389
+ requirement that this zero of the PA should be closer to
390
+ the origin a = 0 than any pole on the real positive a-axis,
391
+ since otherwise the pole would dominate the IR to UV
392
+ flow starting at the origin. If a Pad´e approximant were
393
+ to exhibit such a zero, then one would proceed to inquire
394
+ how close it is to any of the aUV,nℓ in Eq. (3.1). However,
395
+ we find that none of these Pad´e approximants (4.2)-(4.6)
396
+ has a zero on the positive real a axis.
397
+ Explicitly, the
398
+ [5,1] PA has two complex-conjugate pairs of zeros at a =
399
+ −0.12719±0.26046i and a = 0.26922±0.20930i, together
400
+ with a real zero at a = −0.074837.
401
+ This real zero is
402
+ part of a nearly coincident pole-zero pair, with the pole
403
+ of the [5,1] PA being located at a = −0.073267. The
404
+ appearance of a nearly coincident pole-zero pair close to
405
+ a point a0 in a [p, q] Pad´e approximant is typically an
406
+ indication that the function that the PA is fitting has
407
+ neither a pole nor a zero in the local neighborhood of
408
+ a0, since as the locations of the nearly coincident pole-
409
+ zero pair approach each other, they simply divide out in
410
+ the ratio (4.1). Each of the Pad´e approximants that we
411
+ calculate here has a pole-zero pair. The [4,2] PA has zeros
412
+ at the complex-conjugate pair a = 0.42009 ± 0.96575i,
413
+ together with the real values a = {−0.16929, −0.064970}
414
+ and poles at a = {−0.14481, −0.064414}. The [3,3] PA
415
+ has zeros at a = {−0.78531, −0.13282, −0.061458}, and
416
+ poles at a = {−0.42342, −0.12140, −0.061112}. The
417
+ [2,4] PA has zeros at a = {−0.15193, −0.063563}, and
418
+ poles at a = {−0.69186, −0.13432, −0.063100, 1.8689}.
419
+ Finally, the [1,5] PA has a zero at a = −0.071313 and
420
+ poles at a = {−0.22780, −0.070185, 0.44160, 0.035937±
421
+ 0.39287i}. Thus, our analysis with Pad´e approximants of
422
+ the seven-loop beta function yields the same conclusion
423
+ as our analysis of the beta function itself, namely that
424
+ there is no evidence for a stable, reliably perturbatively
425
+ calculable UV zero up to this seven-loop level.
426
+ V.
427
+ EFFECTS OF SCHEME
428
+ TRANSFORMATIONS
429
+ Since the terms in the beta function at loop order n ≥ 3
430
+ are scheme-dependent, it is necessary to assess the effect
431
+ of scheme transformations in an analysis of zeros of a
432
+ higher-loop beta function. A scheme transformation can
433
+ be expressed as a mapping between a and a transformed
434
+ coupling a′,
435
+ a = a′f(a′) ,
436
+ (5.1)
437
+ where f(a′) is the scheme transformation function. Since
438
+ this transformation has no effect in the free theory, one
439
+ has f(0) = 1. We consider f(a′) functions that are ana-
440
+ lytic about a = a′ = 0 and hence can be expanded in the
441
+ form
442
+ f(a′) = 1 +
443
+ smax
444
+
445
+ s=1
446
+ ks(a′)s ,
447
+ (5.2)
448
+
449
+ 5
450
+ where the ks are constants and smax may be finite or
451
+ infinite. The beta function in the transformed scheme,
452
+ βa′ = da′/d ln µ, has the expansion
453
+ βa′ = a′
454
+
455
+
456
+ ℓ=1
457
+ b′
458
+ ℓ(a′)ℓ .
459
+ (5.3)
460
+ In [24], formulas were derived for the b′
461
+ ℓ in terms of bℓ
462
+ and the ks. In addition to b′
463
+ 1 = b1 and b′
464
+ 2 = b2, these are
465
+ b′
466
+ 3 = b3 + k1b2 + (k2
467
+ 1 − k2)b1 ,
468
+ (5.4)
469
+ b′
470
+ 4 = b4 + 2k1b3 + k2
471
+ 1b2 + (−2k3
472
+ 1 + 4k1k2 − 2k3)b1 , (5.5)
473
+ and so forth for higher ℓ. These results are applicable
474
+ to the study of both an IR zero in the beta function of
475
+ an asymptotically free theory and a possible UV zero in
476
+ the beta function of an IR-free theory. They were exten-
477
+ sively applied to assess scheme dependence in higher-loop
478
+ studies of an IR fixed point in asymptotically free non-
479
+ Abelian gauge theories [24–28].
480
+ For the present λφ4 theory, a study of scheme depen-
481
+ dence was carried out in [18]. It was shown that even
482
+ when one shifts to a scheme different from the usual MS
483
+ scheme, the beta function still does not satisfy a requi-
484
+ site condition for a physical UV zero, namely that the
485
+ value of this zero (in a given scheme) should not change
486
+ strongly when it is calculated to successive loop orders.
487
+ This result from [18] also holds in the same way in the
488
+ present seven-loop context.
489
+ VI.
490
+ CONCLUSIONS
491
+ In this paper we have investigated whether the real
492
+ scalar field theory with a λφ4 interaction exhibits evi-
493
+ dence of an ultraviolet zero in the beta function.
494
+ Us-
495
+ ing the seven-loop coefficient b7 from [15], our present
496
+ study extends our previous six-loop study in [19, 20] to
497
+ the seven-loop level. Our work includes a study of the
498
+ seven-loop beta function itself, together with an analysis
499
+ of Pad´e approximants. We conclude that, for the range
500
+ of couplings where the perturbative calculation of this
501
+ beta function may be reliable, it does not exhibit robust
502
+ evidence for an ultraviolet zero.
503
+ Acknowledgments
504
+ I would like to thank Oliver Schnetz for valuable dis-
505
+ cussions on [15]. This research was supported in part by
506
+ the U.S. National Science Foundation Grant NSF-PHY-
507
+ 22-15093.
508
+ [1] Some early studies on the renormalization group include
509
+ E. C. G. Stueckelberg and A. Peterman, Helv. Phys. Acta
510
+ 26, 499 (1953); M. Gell-Mann and F. Low, Phys. Rev. 95,
511
+ 1300 (1954); N. N. Bogolubov and D. V. Shirkov, Doklad.
512
+ Akad. Nauk SSSR 103, 391 (1955); C. G. Callan, Phys.
513
+ Rev. D 2, 1541 (1970); K. Symanzik, Commun. Math.
514
+ Phys. 18, 227 (1970); K. Wilson, Phys. Rev. D 3, 1818
515
+ (1971).
516
+ [2] Some early references include K. G. Wilson and J. Kogut,
517
+ Phys. Repts. 12, 75 (1974); M. Aizenman, Commun.
518
+ Math. Phys. 82, 69 (1982); B. Freedman, P. Smolensky,
519
+ and D. Weingarten, Phys. Lett. B 113, 491 (1982); J.
520
+ Fr¨ohlich, Nucl. Phys. B 200, 281 (1982); R. F. Dashen
521
+ and H. Neuberger, Phys. Rev. Lett. 50, 1897 (1983); C.
522
+ B. Lang, Nucl. Phys. B 240, 577 (1984); J. Kuti, L. Lin,
523
+ and Y. Shen, Phys. Rev. Lett. 61, 678 (1988); M. L¨uscher
524
+ and P. Weisz, Nucl. Phys. B 290, 25 (1987).
525
+ [3] J. Zinn-Justin, Quantum Field Theory and Critical Phe-
526
+ nomena, 4th ed. (Oxford Univ. Press, Oxford, 2002).
527
+ [4] H. Kleinert and V. Schulte-Frohlinde, Critical Properties
528
+ of φ4 Theories (World Scientific, Singapore, 2001).
529
+ [5] E. Br´ezin, J. C. Le Guillou, and J. Zinn-Justin, Phys.
530
+ Rev. D 9, 1121 (1974).
531
+ [6] D. J. Gross, in R. Balian and J. Zinn-Justin, eds. Meth-
532
+ ods in Field Theory, Les Houches 1975 (North Holland,
533
+ Amsterdam, 1976), p. 141.
534
+ [7] G. ’t Hooft, Nucl. Phys. B 61, 455 (1973).
535
+ [8] W. A. Bardeen, A. J. Buras, D. W. Duke, and T. Muta,
536
+ Phys. Rev. D 18, 3998 (1978).
537
+ [9] A. A. Vladimirov, D. I. Kazakov, and O. V. Tarasov, Zh.
538
+ Eksp. Teor. Fiz. 77, 1035 (1979) [Sov. Phys. JETP 50,
539
+ 521 (1979)].
540
+ [10] K. G. Chetyrkin, S. G. Gorishny, S. A. Larin and F. V.
541
+ Tkachov, Phys. Lett. B 132, 351 (1983).
542
+ [11] S. G. Gorishny, S. A. Larin, and F. V. Tkachov, Phys.
543
+ Lett. A 101, 120 (1984).
544
+ [12] H.
545
+ Kleinert,
546
+ J.
547
+ Neu,
548
+ V. Schulte-Frohlinde,
549
+ K.
550
+ G.
551
+ Chetyrkin, and S. A. Larin, Phys. Lett. B 272, 39 (1991);
552
+ Erratum: Phys. Lett. B 319, 545 (1993).
553
+ [13] M. V. Kompaniets and E. Panzer, PoS Loops and Legs
554
+ LL2016, 038 (2016) [arXiv:1606.09210].
555
+ [14] M. V. Kompaniets and E. Panzer, Phys. Rev. D 96,
556
+ 036016 (2017) [arXiv:1705.0648].
557
+ [15] O. Schnetz, Phys. Rev. D 97, 085018 (2021).
558
+ [16] L. N. Lipatov, Sov. Phys. JETP 45, 216 (1977) [Zh. Eksp.
559
+ Teor. Fiz. 72, 411 (1977)]; E. Br´ezin, J. C. Le Guillou,
560
+ and J. Zinn-Justin, Phys. Rev. D 15, 1544 (1977); G.
561
+ Parisi, Phys. Lett. B 66, 167 (1977); M. C. Berg`ere and
562
+ F. David, Phys. Lett. B 135, 412 (1984); J. C. Le Guillou
563
+ and J. Zinn-Justin, eds., Large Order Behavior of Per-
564
+ turbation Theory (North-Holland, Amsterdam, 1990).
565
+ [17] G. V. Dunne and M. Meynig, Phys. Rev. D 105, 025019
566
+ (2022).
567
+ [18] R. Shrock, Phys. Rev. D 90, 065023 (2014).
568
+ [19] R. Shrock, Phys. Rev. D 94, 125026 (2016).
569
+ [20] R. Shrock, Phys. Rev. D 96, 056010 (2017).
570
+ [21] E. Br´ezin and J. Zinn-Justin, Phys. Rev. Lett. 36, 691
571
+ (1976); Phys. Rev. B 14, 3110 (1976); W. A. Bardeen, B.
572
+
573
+ 6
574
+ W. Lee, and R. E. Shrock, Phys. Rev. D 14, 985 (1976);
575
+ see also A. Polyakov, Phys. Lett. B 59, 79 (1975).
576
+ [22] See, e.g., J. Bl¨umlein, D. J. Broadhurst, and J. A. M.
577
+ Vermaseren, Comput. Phys. Commun. 181, 582 (2010).
578
+ [23] G. A. Baker, Essentials of Pad´e Approximants (Academic
579
+ Press, New York, 1975).
580
+ [24] T. A. Ryttov and R. Shrock, Phys. Rev. D 86, 065032
581
+ (2012); Phys. Rev. D 86, 085005 (2012).
582
+ [25] R. Shrock, Phys. Rev. D 88, 036003 (2013); Phys. Rev.
583
+ D 90, 045011 (2014).
584
+ [26] T. A. Ryttov, Phys. Rev. Phys. Rev. D 89, 016013
585
+ (2014); Phys. Rev. D 89, 056001 (2014); Phys. Rev. D
586
+ 90, 056007 (2014).
587
+ [27] G. Choi and R. Shrock, Phys. Rev. D 90, 125029 (2014).
588
+ [28] J. A. Gracey and R. M. Simms, Phys. Rev. D 91, 085037
589
+ (2015).
590
+
dtAzT4oBgHgl3EQf3f5y/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ftAzT4oBgHgl3EQfof1Y/content/tmp_files/2301.01597v1.pdf.txt ADDED
@@ -0,0 +1,2796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Demystify Problem-Dependent Power of Quantum Neural Networks on Multi-Class
2
+ Classification
3
+ Yuxuan Du,1, ∗ Yibo Yang,1 Dacheng Tao,1 and Min-Hsiu Hsieh2
4
+ 1JD Explore Academy, Beijing 10010, China
5
+ 2Hon Hai (Foxconn) Research Institute, Taipei, Taiwan
6
+ Quantum neural networks (QNNs) have become an important tool for understanding the physical
7
+ world, but their advantages and limitations are not fully understood. Some QNNs with specific en-
8
+ coding methods can be efficiently simulated by classical surrogates, while others with quantum mem-
9
+ ory may perform better than classical classifiers. Here we systematically investigate the problem-
10
+ dependent power of quantum neural classifiers (QCs) on multi-class classification tasks. Through
11
+ the analysis of expected risk, a measure that weighs the training loss and the generalization error of
12
+ a classifier jointly, we identify two key findings: first, the training loss dominates the power rather
13
+ than the generalization ability; second, QCs undergo a U-shaped risk curve, in contrast to the
14
+ double-descent risk curve of deep neural classifiers. We also reveal the intrinsic connection between
15
+ optimal QCs and the Helstrom bound and the equiangular tight frame. Using these findings, we
16
+ propose a method that uses loss dynamics to probe whether a QC may be more effective than a
17
+ classical classifier on a particular learning task. Numerical results demonstrate the effectiveness of
18
+ our approach to explain the superiority of QCs over multilayer Perceptron on parity datasets and
19
+ their limitations over convolutional neural networks on image datasets. Our work sheds light on the
20
+ problem-dependent power of QNNs and offers a practical tool for evaluating their potential merit.
21
+ I.
22
+ INTRODUCTION
23
+ The advent of hardware fabrication pushes the bound-
24
+ ary of quantum computing from verifying its superiority
25
+ on artificial tasks [1–3] to conquering realistic problems
26
+ with merits [4–6]. This has led to the emergence of a
27
+ popular paradigm known as quantum neural networks
28
+ (QNNs), which combine variational quantum Ans¨atze
29
+ with classical optimizers [7, 8].
30
+ So far, various QNN-
31
+ based methods have been proposed to address difficult
32
+ problems in areas such as quantum physics [9–12], quan-
33
+ tum information theory [13–16], combinatorial optimiza-
34
+ tion [17–21], and machine learning [22–26]. Among these
35
+ applications, QNNs are often deployed as quantum clas-
36
+ sifiers (QCs) to predict correct labels of the input data
37
+ [27–32], e.g., categorize image objects [33–35], classify
38
+ phases of quantum matters [36–39], and distinguish en-
39
+ tangled states from separable states [40, 41].
40
+ To comprehend the full potential of existing quantum
41
+ classifiers (QCs) and to spur the development of novel
42
+ QCs, huge efforts have been made to unveil the learnabil-
43
+ ity of QCs [42–44]. Prior literature establishes the foun-
44
+ dations of QCs from three primary aspects, i.e., model
45
+ capacity [45–48], trainability [49–51], and generalization
46
+ [52–57]. Nevertheless, the advantages and constraints of
47
+ QCs have rarely been proven [57–62]. Meanwhile, pre-
48
+ vious results cannot rigorously explain the empirical ob-
49
+ servations such that QCs generally outperform classical
50
+ classifiers (CCs) on handcraft or quantum data [44, 63]
51
+ but are inferior to them on realistic problems [64]. As
52
+ a result, the need for QCs to address classical issues re-
53
+ mains highly questionable.
54
55
+ A principal criteria in characterizing the power of a
56
+ classifier is the expected risk [65], which weighs the em-
57
+ pirical risk (i.e., training loss) and the generalization er-
58
+ ror (i.e., test loss) jointly. An optimal classifier is one
59
+ which achieves zero expected risk. As shown in Fig. 1(a),
60
+ the success of deep neural classifiers is attributed to their
61
+ double-descent risk curves [66, 67]. This means that as
62
+ the hypothesis space is continually expanded, the ex-
63
+ pected risk of a trained deep neural classifier initially
64
+ decreases, increases, and when it overfits the train set,
65
+ undergoes a second descent. As such, to show the supe-
66
+ riority of QCs over CCs, it demands to distill ubiquitous
67
+ rules that capture the risk curve of diverse QCs in addi-
68
+ tion to conditions where the expected risk of QCs can be
69
+ lower than CCs.
70
+ In this study, we unify a broad class of QCs in
71
+ the same framework and understand their problem-
72
+ dependent ability under the expected risk (see Fig. 1(b)).
73
+ Our analysis reveals two substantial outcomes: (i) train-
74
+ ability dominates QCs’ ability more than generalization
75
+ ability; (ii) QCs undergo a U-shape risk curve instead
76
+ of the double-descent curve for CCs.
77
+ These outcomes
78
+ consolidate and refine previous observations. Concretely,
79
+ the first outcome suggests that the deficiency of QCs on
80
+ classical data stems from their limited ability to fit the
81
+ train set, resulting in a larger training loss compared to
82
+ CCs. The second outcome highlights the distinct learn-
83
+ ing behavior of QCs and CCs. Despite the fact that over-
84
+ parameterization is fundamental to enhance the perfor-
85
+ mance of CCs, it adversely affects the power of QCs.
86
+ In line with the diverse dynamics of the risk curves for
87
+ QCs and CCs, we devise an efficient problem-dependent
88
+ method to recognize potential merits of QCs, as shown
89
+ in Fig. 1(a). Conceptually, for a given learning task, our
90
+ method fits the loss (risk) dynamics of QC and CC under
91
+ the prior (i.e., U-shape versus double descent) and then
92
+ arXiv:2301.01597v1 [quant-ph] 29 Dec 2022
93
+
94
+ 2
95
+ (a)
96
+ (b)
97
+ 𝑜(") 𝑜($) 𝑜(%)
98
+ 𝜌(&)
99
+ ℇ(𝑥(&))
100
+ 𝑥(&)
101
+ 𝑜(')
102
+
103
+ 𝐶-ℛ
104
+ Hypothesis space
105
+ 𝑄-ℛ
106
+ 𝑄-ℛ!"#
107
+
108
+ 𝐶-ℛ
109
+ Hypothesis space
110
+ 𝑄-ℛ
111
+ 𝑄-ℛ!"#
112
+ (c)
113
+ 𝜌̅(%), 𝑜(%)
114
+ 𝜌̅ ' , 𝑜(')
115
+ 𝜌̅ ( , 𝑜(() 𝜌̅ ) , 𝑜())
116
+ optimize
117
+ 𝜌̅(%), 𝑜(%)
118
+ 𝜌̅ ' , 𝑜(')
119
+ 𝜌̅ ( , 𝑜(() 𝜌̅ ) , 𝑜())
120
+ FIG. 1. Risk curve and geometry of the unified QCs. (a) The risk curve of QCs and CCs are highlighted by the solid red
121
+ and blue lines (labeled by ‘Q-R’ and ‘C-R’), respectively. The former yields a ‘U’ shape while the latter yields a double-descent
122
+ tendency. Potential advantages of QCs are dominated by the empirical risk, highlighted by the dashed curve. The shaded
123
+ region refers to the potential merits of QCs. (b) The unified QC consists of two parts, the feature state ρ and the measure
124
+ operator o. This model covers diverse QCs. (c) Geometric relationship between {ρ(i,k)} and o of QCs with (near) zero training
125
+ loss: (i) the feature states associated with train samples belonging to the same class concentrate around their class-feature
126
+ mean, i.e., ¯ρ∗(k) := ρ∗(1,k) = ... = ρ∗(nc,k) for ∀k ∈ [K]; (ii) the class-feature means are maximally distant with each other, i.e.,
127
+ Tr(¯ρ∗(k)¯ρ∗(k′)) ∼ δk,k′; (iii) the measure operator should align with class-feature means, i.e., Tr(¯ρ∗(k)o∗(k′)) ∼ δk,k′.
128
+ identify the ‘advantage’ regime where the risk of QC is
129
+ lower than CC. Numerical simulations are conducted to
130
+ support our theoretical results.
131
+ On the technical level, we approach the two outcomes
132
+ by separately quantifying the empirical risk and gener-
133
+ alization error of QCs. Specifically, we first prove con-
134
+ ditions of QCs that lead to near-zero empirical risk, the
135
+ geometric interpretation of which is depicted in Fig. 1(c).
136
+ As a byproduct, we elucidate how such conditions are
137
+ inherently linked to quantum state discrimination and
138
+ quantum measurement theory.
139
+ In addition, we prove
140
+ that deep QCs can never reach the vanished empirical
141
+ risk by utilizing the concentration property of quantum
142
+ observables [68, 69]. We next analyze the generalization
143
+ error of QCs by exploiting algorithmic robustness [70].
144
+ The derived bound surpasses prior results because it is
145
+ the first non-vacuous bound in the over-parameterized
146
+ regime.
147
+ By combining the unreachable zero empirical
148
+ risk with the manipulatable generalization error, we ob-
149
+ tain the first outcome. The second outcome is gained by
150
+ integrating the fact that deep QCs are unable to reach
151
+ the vanished empirical risk with the first outcome.
152
+ II.
153
+ MAIN RESULTS
154
+ Expected risk.— Let us first introduce a K-class (K ≥
155
+ 2) classification task. Denote the input space as X, the
156
+ label (class) space as Y = {1, · · · , K}, and the train set
157
+ as D = �K
158
+ k=1{(x(i,k), y(i,k))}nk
159
+ i=1 with |D| samples drawn
160
+ i.i.d.
161
+ from an unknown probability distribution D on
162
+ Z = X × Y. In standard scenarios, the number of train
163
+ samples in each class is the same, i.e., n1 = ... = nk ≡ nc
164
+ and |D| := n = Knc.
165
+ The purpose of a classification
166
+ algorithm A is using D to infer a hypothesis (a.k.a., a
167
+ classifier) hAD : X → RK from the hypothesis space H
168
+ to separate train examples from different classes. This
169
+ is equivalent to identifying an optimal hypothesis in H
170
+ minimizing the expected risk R(h) = E(x,y)∼D[ℓ(h(x), y)],
171
+ where ℓ(·, ·) is the per-sample loss and for clarity we spec-
172
+ ify it as the square error with ℓ(a, b) = 1
173
+ 2∥a − b∥2
174
+ 2 [71].
175
+ Unfortunately, the inaccessible distribution D forbids us
176
+ to assess the expected risk directly. In practice, A al-
177
+ ternatively learns an empirical classifier ˆh ∈ H, as the
178
+ global minimizer of the (regularized) loss function
179
+ L(h, D) = 1
180
+ n
181
+ nc
182
+
183
+ i=1
184
+ K
185
+
186
+ k=1
187
+ ℓ(h(x(i,k)), y(i,k)) + E(h),
188
+ (1)
189
+ where E(h) is an optional regularizer.
190
+ The foremost role of the risk means that quantum ad-
191
+ vantages can be ascertained if R(ˆhQ) < R(ˆhC), where ˆhQ
192
+ and ˆhC are the empirical QC and CC on D. Unlike con-
193
+ ventions merely focusing on a QC on one specific task,
194
+ the above criteria orients to unearth ubiquitous rules of
195
+ QCs with computational advantages.
196
+ To reconcile the
197
+ intractable issue of R(ˆh) and proceed the following anal-
198
+ ysis, we decomposed it into two measurable terms, i.e.,
199
+ R(ˆh) = RERM(ˆh) + RGene(ˆh),
200
+ (2)
201
+ where RERM(ˆh) = 1
202
+ n
203
+ �n
204
+ i=1
205
+ �K
206
+ k=1 ℓ(ˆh(x(i,k)), y(i,k)) is the
207
+ empirical risk and RGene(ˆh) = R(ˆh)−RERM(ˆh) is the gen-
208
+ eralization error. Based on Eq. (2), detecting advances
209
+ of QCs is translated to deriving under what conditions
210
+ do QCs commit both lower RERM and RGene than CCs.
211
+ To better elucidate our results, let us recall that the
212
+ general form of QC is ˆhQ = arg minhQ∈HQ L(hQ, D),
213
+
214
+ Learning tasks
215
+ Advantages
216
+ No Advantages介83
217
+ where L is defined in Eq. (1) and HQ is the hypothe-
218
+ sis space. For an N-qubit QC, its hypothesis space is
219
+ HQ =
220
+ ��
221
+ hQ(·, U(θ), O(k))
222
+
223
+ k=1:K
224
+ ���θ ∈ Θ
225
+
226
+ ,
227
+ (3)
228
+ where [·]k=1:K is a K-dimensional vector, its k-th entry
229
+ hQ(x, U(θ), O(k)) = Tr(O(k)U(θ)σ(x)U(θ)†) for ∀k ∈
230
+ [K] refers to the output (prediction) of quantum cir-
231
+ cuits, σ(x) = UE(x)(|0⟩ ⟨0|)⊗NUE(x)† is the input state
232
+ of x with the encoding circuit UE(·), O = {O(k)}K
233
+ k=1
234
+ is a set of measure operators, and U(θ) is the adopted
235
+ Ansatz with trainable parameters θ living in the pa-
236
+ rameter space Θ. Without loss of generality, we define
237
+ U(θ) = �Nt
238
+ l=1(ul(θ)ue) ∈ U(2N), where ul(θ) ∈ U(2m)
239
+ is the l-th parameterized quantum gate operated with at
240
+ most m qubits (m ≤ N) and ue refers to fixed quan-
241
+ tum gates. Similarly, we define UE(x) = �Ng
242
+ g=1 ug(x) ∈
243
+ U(2N), where ug(x) ∈ U(2m) refers to the g-th quan-
244
+ tum gate operated with at most m qubits, and Ng gates
245
+ contain Nge tunable gates and Ng − Nge fixed gates.
246
+ Due to the diverse constructions of U(θ) and UE(·), it
247
+ is necessary to unify various QCs into the same frame-
248
+ work to obtain the generic results. Notably, the unified
249
+ QC should be agnostic to particular forms of these two
250
+ terms. A feasible way is rewritten hQ(·, U(θ), O(k)) as
251
+ hQ(ρ(i,k), o(k)) := Tr(ρ(i,k)o(k)) ∀k ∈ [K],
252
+ (4)
253
+ where O(k) = I2N−D ⊗o(k) with the nontrivial local oper-
254
+ ator o(k) ∈ C2D×2D, D describes the locality, and ρ(i,k) =
255
+ TrD(U(θ)σ(x(i,k))U(θ)†) corresponds to the state before
256
+ measurements, named as feature state. An intuition of
257
+ the unified QC is shown in Fig. 1(b).
258
+ We are now ready to exploit the unified framework
259
+ to analyze the expected risk of QCs. Let ρ = {ρ(i,k)}
260
+ and o = {o(k)} be two sets collecting all feature states
261
+ and measure operators. The following theorem exhibits
262
+ conditions in which QCs allow a low expected risk, where
263
+ the formal statement and the proof are deferred to SM A.
264
+ Theorem 1 (informal). Following notations in Eqs. (1)-
265
+ (4), when the train data size is nO(KNge log KNg
266
+ ϵδ ) with ϵ
267
+ being the tolerable error, and the optimal sets of ρ∗ and
268
+ o∗ satisfy three conditions: (i) the feature states have
269
+ the vanished variability in the same class; (ii) all feature
270
+ states are equal length and are orthogonal in the varied
271
+ classes; (iii) any feature state is alignment with the mea-
272
+ sure operator in the same class, with probability 1−δ, the
273
+ expected risk of QC tends to be zero, i.e., R(ˆhQ) → 0.
274
+ Conditions (i)-(iii) visualized in Fig. 1(c) sculpt the ge-
275
+ ometric interpretations of ρ∗ and o∗. These properties
276
+ come across the design philosophy of CCs, e.g., linear dis-
277
+ criminant analysis and neural collapse phenomenon ap-
278
+ peared in most deep neural classifiers [71–73]. Moreover,
279
+ these conditions unveil the intrinsic connection between
280
+ optimal QCs and the quantum state discrimination [74],
281
+ since ρ∗ and o∗ should maximize the Helstrom bound
282
+ [75], which explains the ultimate limit of QCs observed
283
+ in [76]. However, as will be explained later (see Corol-
284
+ lary 1 and Lemma 1), under certain scenarios, it is hard
285
+ for QCs to meet these conditions. A typical instance is
286
+ applying QC to learn the image dataset, where the dif-
287
+ ficulty stems from the limited nonlinearity of QC to fit
288
+ the train set, thereby inducing a large empirical risk.
289
+ Conditions (i)-(iii) also imply how the quantum mea-
290
+ surement theory can be used to guide the design of QCs.
291
+ Namely, the mean feature states of each class {¯ρ∗(k)}
292
+ compose the equiangular tight frame (ETF) and Con-
293
+ dition (iii) suggests that the optimal measure operators
294
+ {o∗} also satisfy this ETF [77]. Due to the relation be-
295
+ tween symmetric informationally complete (SIC) mea-
296
+ surements and ETF [78, 79], the optimal measure op-
297
+ erators could be estimated by various SIC construction
298
+ strategies [80].
299
+ Besides, the locality D of {o∗} should
300
+ be carefully selected in QCs in which a small D pre-
301
+ cludes the acquisition of the optimal QCs (i.e., the com-
302
+ plex ETF does not exist when 2D = K [81, 82]), while an
303
+ extremely large D may incur the barren plateaus [83, 84].
304
+ Furthermore, when K is large, Pauli-based measurements
305
+ are preferable than computational basis measurements in
306
+ QCs, since the former allows classical shadow techniques
307
+ to accelerate the training of QCs [85, 86].
308
+ The scaling behavior of n indicates that it is data-
309
+ efficient for QCs to attain a low generalization error,
310
+ where the size of train set only linearly depends on the
311
+ class number K and the number of encoding gates Nge
312
+ (see Lemma 3 for the technical elaboration).
313
+ In other
314
+ words, the generalization error of QCs can be well con-
315
+ trolled by the modest-size train data.
316
+ According to Theorem 1, the challenges in satisfying
317
+ Conditions (i)-(iii) and the well controlled generalization
318
+ error pinpoint that the risk of a QC is mostly dominated
319
+ by its empirical loss rather than its generalization error.
320
+ In this view, the core in devising advanced QCs is tailor-
321
+ ing HQ in Eq. (3) so that ˆhQ has a (near) zero empirical
322
+ risk on D, or equivalently examining whether the em-
323
+ ployed QCs can fulfill Conditions (i)-(iii).
324
+ U-shape risk curve.—The risk curve concerns how
325
+ the expected risk of a classifier behaves with the varied
326
+ hypothesis space. It is desired that as with deep neu-
327
+ ral classifiers, QCs undergo a double-descent risk curve
328
+ in the sense that the over-parameterized QCs consent
329
+ a low expected risk when the trainable parameters Nt
330
+ is much greater than the train data n.
331
+ If so, ‘over-
332
+ parameterization’ could serve as a golden law in designing
333
+ QCs. However, the following corollary refutes the exis-
334
+ tence of the double-descent risk curve for QCs.
335
+ Corollary
336
+ 1.
337
+ Following
338
+ notations
339
+ in
340
+ Theorem
341
+ 1,
342
+ when {UE(x)|x
343
+
344
+ X} follows the Haar distribu-
345
+ tion, with probability 1 − δ, the empirical QC follows
346
+ | Tr
347
+
348
+ σ(x(i,k))σ(x)
349
+
350
+ − 1
351
+ 2N | ≤
352
+
353
+ 3
354
+ 22Nδ. When {U(θ)|θ ∈ Θ}
355
+ follows the Haar distribution, with probability 1 − δ,
356
+ the empirical QC follows | Tr(ρ(i,k)o(k′)) − Tr(o(k′))
357
+ 2D
358
+ | <
359
+
360
+ 4
361
+
362
+ Tr(o(k′))2+2 Tr((o(k′))2)
363
+ 22Dδ
364
+ .
365
+ The proof is deferred to SM B. The achieved results re-
366
+ veal the caveat of deep QCs. Specifically, when UE(x)
367
+ is deep, two encoded states σ(x(i,k)) and σ(x(i′,k)) from
368
+ the same class tend to be orthogonal, which contradicts
369
+ with Conditions (i) in Theorem 1. Besides, when U(θ)
370
+ is deep, the output of QC concentrates to zero, regard-
371
+ less how o(k′) and ρ(i,k) are selected. This violates Con-
372
+ dition (iii) in Theorem 1.
373
+ Overall, over-parameterized
374
+ QCs encounter the high empirical risk and thus the high
375
+ expected risk, which suggests that QCs experience a U-
376
+ shape risk curve. This observation differs the dynamics
377
+ of QCs from variational quantum Eigensolvers, since the
378
+ latter can benefit from over-parameterization, e.g., better
379
+ trainability and convergence rate [87–90]. Moreover, the
380
+ rule of thumb in QCs’ construction is slimming HQ to
381
+ find the valley region. Intriguingly, tailoring the feature
382
+ states echoes with quantum metric learning and quantum
383
+ self-supervised learning [91–95].
384
+ Probe power of QCs via loss dynamics.—The dis-
385
+ tinct tendency of the risk curves between QCs and CCs
386
+ provides a succinct way to recognize the potential quan-
387
+ tum advantages. As shown in Fig. 1(a), given a specific
388
+ data set, the U-shape risk curve of QCs indicates that its
389
+ advantages mostly appear in the valley region. Precisely,
390
+ if the risk values of QC around the basin are lower than
391
+ those of CC, potential merits may exist; otherwise, QC
392
+ is inferior to CC. The proved learning behavior of QCs,
393
+ accompanied with the tight generalization bound, allows
394
+ us to effectively fit its risk curve according to their loss
395
+ dynamics. Specifically, our method contains three steps.
396
+ First, W tuples of {n, Nt, T} are initialized based on The-
397
+ orem 1 so that the collected risk points of QC span the
398
+ basin area with low generalization error. Second, we ex-
399
+ ecute QC and CC under these W hyper-parameter set-
400
+ tings and fit their loss dynamics to attain the risk curve.
401
+ Last, we compare two risk curves and probe potential
402
+ advantages. See SM F for the implementation details.
403
+ Technical analysis.—Theorem 1 is achieved by ana-
404
+ lyzing when RERM(ˆhQ) and RGene(ˆhQ) are (near) zero.
405
+ In the analysis of RERM(ˆhQ), we first consider the most
406
+ general case in which both ρ and o are tunable, where
407
+ ˆhQ ≡ hQ(ρ∗, o∗) with (ρ∗, o∗) = minρ,o L(ρ, o).
408
+ Lemma 1 (Informal). When the regularizer E is consid-
409
+ ered and (ρ∗, o∗) meets the three conditions in Theorem
410
+ 1, the global minimizer leads to RERM(ˆhQ) = C2
411
+ 1/2 with
412
+ C1 depending on the hyper-parameters in E.
413
+ The achieved properties of o∗ can be used as a priori to
414
+ simplify QCs. To this end, the following lemma quantifies
415
+ RERM(ˆhQ) when o is predefined and E = 0, where ˆhQ ≡
416
+ hQ(ρ∗, o) with ρ∗ = minρ L(ρ, o).
417
+ Lemma 2 (Informal). When the predefined {o(k)} are
418
+ mutually orthogonal with each other and the conditions
419
+ in Theorem 1 are satisfied, we have RERM(ˆhQ) = 0.
420
+ The proofs of Lemmas 1 and 2 are given in SM C&D.
421
+ We next analyze RGene(ˆhQ). Prior results cannot be
422
+ used to prove Theorem 1, since such bounds polynomially
423
+ scale with the trainable parameters and become vacuous
424
+ in the over-parameterized regime. To remedy this issue,
425
+ we utilize the concept of algorithmic robustness [70].
426
+ Definition 1 (Robustness). A learning algorithm A is
427
+ (R, ν(·))-robust with R ∈ N and ν(·) : Zn → R, if Z can
428
+ be partitioned into R disjoint sets, denoted by {Cr}R
429
+ r=1,
430
+ such that the following holds for all D ⊂ Zn : ∀s =
431
+ (x(i), y(i)) ∈ D, ∀z = (x, y) ∈ Z, ∀r ∈ [R],
432
+ s, z ∈ Cr ⇒ |l(hAD(x(i)), y(i)) − l(hAD(x), y)| ≤ ν(D).
433
+ Concisely, robustness measures how much the loss value
434
+ can be varied with respect to the input space Z. A higher
435
+ robustness of a classifier admits lower R, ν(·), and RGene
436
+ [70]. The following lemma quantifies the upper bound of
437
+ RGene(ˆhQ) whose proof is given in SM E.
438
+ Lemma 3. Suppose the measure operator is bounded by
439
+ C2 with maxk∈[K] ∥o(k)∥ ≤ C2. Define ϵ as the tolerable
440
+ error. Following notations in Definition 1, the empiri-
441
+ cal QC is (K(28Nge/ϵ)4mNge, 4L1KC2ϵ)-robust, and with
442
+ probability 1 − δ we have
443
+ RGene(ˆhQ) ≤ 4L1KC2ϵ + 5ξ(ˆhQ)
444
+
445
+ |TD|4mNge ln 56KNge
446
+ ϵδ
447
+ n
448
+ ,
449
+ where L1 is the Lipschitz constant of ℓ with respect to
450
+ hQ, ID
451
+ r = {i ∈ [n] : z(i) ∈ Cr}, ξ(ˆh) := maxz∈Z(ℓ(ˆh, z)),
452
+ and TD := {r ∈ [R] : |ID
453
+ r | ≥ 1}.
454
+ The achieved results convey threefold insights. First, our
455
+ bound does not explicitly depend on the number of train-
456
+ able parameters [96]. This unlocks a new way to under-
457
+ stand the generalization ability of QCs, especially for the
458
+ over-parameterized ones. Next, our bound hints that a
459
+ carefully designed UE can enhance performance of QCs
460
+ [53, 97]. Last, RGene(ˆhQ) → 0 requires n ≫ |TD|4mNge.
461
+ Fortunately, a reasonable value of n is sufficient to war-
462
+ rant this condition, because in general m ≤ 2, Nge ∝ |x|,
463
+ and |TD| is continuously decreased from n to K with re-
464
+ spect to the reduced empirical loss.
465
+ III.
466
+ NUMERICAL SIMULATIONS
467
+ We conduct numerical simulations to exhibit that the
468
+ advantages and limitations of QCs on different classifica-
469
+ tion tasks can be interpreted by the derived risk curve
470
+ and feature states. The omitted construction details and
471
+ results are deferred to SM G.
472
+ We first apply QC to accomplish the binary classifica-
473
+ tion on the parity dataset [98–100]. The number of qubits
474
+ is N = 6 and the hardware-efficient Ansatz is adopted
475
+ to realize U(θ).
476
+ The gradient descent method is used
477
+ as the classical optimizer. Two measure operators are
478
+
479
+ 5
480
+ (a)
481
+ (b)
482
+ FIG. 2. Binary classification on the parity dataset. (a)
483
+ The learning performance of QC when the layer number is
484
+ 3.
485
+ The x-axis denotes the epoch numbers.
486
+ Shaded region
487
+ represents variance. The Bloch spheres display the quantum
488
+ feature states at different epochs. (b) The fitted risk curve
489
+ of QC and MLP. The x-axis denotes the number of trainable
490
+ parameters. The label ‘QC-risk’ (‘MLP-risk’) refers to the
491
+ fitted risk curve of QC and MLP. The label ‘QC-res’ (‘MLP-
492
+ res’) refers to the collected results used for fitting the curves.
493
+ o(1) = |0⟩ ⟨0| and o(2) = |1⟩ ⟨1|. The simulation results of
494
+ QC with Nt = 54 are displayed in Fig. 2(a). Particularly,
495
+ the averaged train (test) accuracy steadily grows from
496
+ 44.1% to 100% within 22 epochs, and the corresponding
497
+ loss decreases from 0.26 to 4 × 10−5. The dynamics of
498
+ the feature states {ρ(i,t)} with t ∈ {0, 10, 20, 30, 40} vi-
499
+ sualized by Bloch spheres echo with Lemma 2. Besides,
500
+ QC becomes more robust when we continue the training.
501
+ Although the train (test) accuracy reaches the optimum,
502
+ the loss can be further reduced and suggests a lower risk
503
+ warranted by Theorem 1. We further compare the risk
504
+ curve between QC and multilayer Perceptron (MLP) on
505
+ this dataset. We fit their risk curves following the pro-
506
+ posed method to probe potential quantum merits.
507
+ As
508
+ shown in Fig. 2(b), QC clearly outperforms MLP when
509
+ the trainable parameters ranges from 20 to 140 and the
510
+ valley of the risk curve is around Nt = 70 [101].
511
+ We then apply QC to learn the Fashion-MNIST im-
512
+ age dataset with K = 9 [102]. The employed number of
513
+ qubits is N = 10 and the Pauli-based measure operators
514
+ are employed.
515
+ Convolutional neural networks (CNNs)
516
+ are exploited as the reference. For all classifiers, the num-
517
+ ber of epochs is fixed to be T = 50 and the number of
518
+ trainable parameters Nt ranges from 60 to 9000. Each
519
+ setting is repeated with 3 times.
520
+ As shown in Fig. 3,
521
+ when the layer number is 50 with Nt = 1500, both the
522
+ train and test accuracies of QC are about 50%.
523
+ This
524
+ performance is inferior to CNN under the similar setting.
525
+ To explore whether QC has the potential to outperform
526
+ CNN on this dataset, we compare their risk curves. As
527
+ shown in Fig. 3(b), unlike the parity dataset, QC is evi-
528
+ dently inferior to CNN on Fashion-MNIST dataset.
529
+ (a)
530
+ (b)
531
+ FIG. 3. Multi-class classification on the image dataset
532
+ with K = 9. (a) The learning performance of QC when the
533
+ layer number is 50. (b) The fitted risk curve of QC and CNN.
534
+ All labels have the same meaning with those used in Fig. 2.
535
+ IV.
536
+ DISCUSSIONS AND OUTLOOK
537
+ We understand the potential of diverse QCs in terms of
538
+ the expected risk. Our theoretical findings demonstrate
539
+ that the efficacy of QCs is dependent on the problem at
540
+ hand, which explains the empirical evidence of their supe-
541
+ riority on synthetic and quantum datasets, yet inferiority
542
+ on realistic tasks. With the clear difference between the
543
+ risk curve of QCs and deep neural classifiers, we present a
544
+ concise technique to investigate potential quantum ben-
545
+ efits by fitting their loss dynamics.
546
+ Numerical results
547
+ validate our theoretical results and the effectiveness of
548
+ our method.
549
+ There are several interesting future research directions.
550
+ The U-shape curve of QCs poses two open questions.
551
+ First, can contemporary QCs attain quantum benefits
552
+ on certain classical data when only limited data and re-
553
+ stricted computing resources are available?
554
+ Secondly,
555
+ is it necessary to redesign QCs such as nonlinear QCs
556
+ [103, 104] that can also exhibit a double-descent risk
557
+ curve? Besides, the unearthed connection between the
558
+ conditions towards optimal empirical risk and quantum
559
+ state discrimination opens a new research avenue that
560
+ amplifies the potential of QCs on quantum data aided
561
+ by quantum information theory.
562
+ Finally, it is intrigu-
563
+ ing to extend the developed non-vacuous generalization
564
+ error bound of QCs to other scenarios, such as out-of-
565
+ distribution data, in order to identify potential quantum
566
+ advantages.
567
+ ACKNOWLEDGMENTS
568
+ The authors thank Xinbiao Wang for valuable input
569
+ and inspiring discussions.
570
+
571
+ 0.3
572
+ 1.0
573
+ Loss
574
+ 0.2
575
+ Train
576
+ -0.8
577
+ Loss
578
+ Test
579
+ Acc
580
+ 0.1
581
+ 0.6
582
+ 0.0
583
+ 0.4
584
+ 0
585
+ 10
586
+ 20
587
+ 30
588
+ 4010)
589
+ X[0]
590
+ y
591
+ X[0]
592
+ X[0)
593
+ y
594
+ X[0)
595
+ y
596
+ X
597
+ 10.6
598
+ 0.4
599
+ QC-risk
600
+ K
601
+ S
602
+ QC-res
603
+ R
604
+ 0.2
605
+ MLP-risk
606
+ MLP-res
607
+ 0.0
608
+ 0
609
+ 20
610
+ 40
611
+ 60
612
+ 80
613
+ 100
614
+ 120
615
+ 140
616
+ 1601.00
617
+ QC-risk
618
+ 0.75
619
+ OC-res
620
+ K
621
+ CNN-risk
622
+ 0.50
623
+ R
624
+ CNN-res
625
+ 0.25
626
+ 0.00
627
+ 0
628
+ 1000
629
+ 2000
630
+ 3000
631
+ 4000
632
+ 5000
633
+ 6000
634
+ 7000
635
+ 8000
636
+ 90001.00
637
+ 0.9
638
+ 0.75
639
+ Loss
640
+ SS
641
+ Train
642
+ 0.50 0
643
+ 9 0.6
644
+ Test
645
+ 0.25
646
+ 0.3
647
+ 0.00
648
+ 0
649
+ 10
650
+ 20
651
+ 30
652
+ 40
653
+ 506
654
+ [1] Frank Arute, Kunal Arya, Ryan Babbush, Dave Bacon,
655
+ Joseph C Bardin, Rami Barends, Rupak Biswas, Sergio
656
+ Boixo, Fernando GSL Brandao, David A Buell, et al.
657
+ Quantum supremacy using a programmable supercon-
658
+ ducting processor. Nature, 574(7779):505–510, 2019.
659
+ [2] Han-Sen Zhong, Hui Wang, Yu-Hao Deng, Ming-Cheng
660
+ Chen, Li-Chao Peng, Yi-Han Luo, Jian Qin, Dian Wu,
661
+ Xing Ding, Yi Hu, et al. Quantum computational ad-
662
+ vantage using photons. Science, 370(6523):1460–1463,
663
+ 2020.
664
+ [3] Yulin Wu, Wan-Su Bao, Sirui Cao, Fusheng Chen,
665
+ Ming-Cheng Chen, Xiawei Chen, Tung-Hsun Chung,
666
+ Hui Deng, Yajie Du, Daojin Fan, et al.
667
+ Strong
668
+ quantum computational advantage using a supercon-
669
+ ducting quantum processor.
670
+ Physical review letters,
671
+ 127(18):180501, 2021.
672
+ [4] Xiao Mi, Pedram Roushan, Chris Quintana, Salvatore
673
+ Mandra, Jeffrey Marshall, Charles Neill, Frank Arute,
674
+ Kunal Arya, Juan Atalaya, Ryan Babbush, et al. In-
675
+ formation scrambling in quantum circuits.
676
+ Science,
677
+ 374(6574):1479–1483, 2021.
678
+ [5] Yi Xia, Wei Li, Quntao Zhuang, and Zheshen Zhang.
679
+ Quantum-enhanced data classification with a vari-
680
+ ational entangled sensor network.
681
+ Phys. Rev. X,
682
+ 11:021047, Jun 2021.
683
+ [6] M
684
+ Cerezo,
685
+ Guillaume
686
+ Verdon,
687
+ Hsin-Yuan
688
+ Huang,
689
+ Lukasz Cincio, and Patrick J Coles. Challenges and op-
690
+ portunities in quantum machine learning. Nature Com-
691
+ putational Science, 2(9):567–576, 2022.
692
+ [7] Marcello Benedetti, Erika Lloyd, Stefan Sack, and Mat-
693
+ tia Fiorentini. Parameterized quantum circuits as ma-
694
+ chine learning models. Quantum Science and Technol-
695
+ ogy, 4(4):043001, 2019.
696
+ [8] Marco Cerezo, Andrew Arrasmith, Ryan Babbush, Si-
697
+ mon C Benjamin, Suguru Endo, Keisuke Fujii, Jarrod R
698
+ McClean, Kosuke Mitarai, Xiao Yuan, Lukasz Cincio,
699
+ et al. Variational quantum algorithms. Nature Reviews
700
+ Physics, 3(9):625–644, 2021.
701
+ [9] Xiao Yuan, Suguru Endo, Qi Zhao, Ying Li, and Si-
702
+ mon C Benjamin. Theory of variational quantum sim-
703
+ ulation. Quantum, 3:191, 2019.
704
+ [10] Sam McArdle, Suguru Endo, Al´an Aspuru-Guzik, Si-
705
+ mon C Benjamin, and Xiao Yuan.
706
+ Quantum com-
707
+ putational chemistry.
708
+ Reviews of Modern Physics,
709
+ 92(1):015003, 2020.
710
+ [11] Cristina Cirstoiu, Zoe Holmes, Joseph Iosue, Lukasz
711
+ Cincio, Patrick J Coles, and Andrew Sornborger. Vari-
712
+ ational fast forwarding for quantum simulation beyond
713
+ the coherence time. npj Quantum Information, 6(1):1–
714
+ 10, 2020.
715
+ [12] Google AI Quantum, Collaborators*†, Frank Arute,
716
+ Kunal Arya, Ryan Babbush, Dave Bacon, Joseph C
717
+ Bardin,
718
+ Rami
719
+ Barends,
720
+ Sergio
721
+ Boixo,
722
+ Michael
723
+ Broughton, Bob B Buckley, et al.
724
+ Hartree-fock on
725
+ a superconducting qubit quantum computer. Science,
726
+ 369(6507):1084–1089, 2020.
727
+ [13] Jonathan
728
+ Romero,
729
+ Jonathan
730
+ P
731
+ Olson,
732
+ and
733
+ Alan
734
+ Aspuru-Guzik. Quantum autoencoders for efficient com-
735
+ pression of quantum data. Quantum Science and Tech-
736
+ nology, 2(4):045001, 2017.
737
+ [14] Yuxuan Du and Dacheng Tao. On exploring practical
738
+ potentials of quantum auto-encoder with advantages.
739
+ arXiv preprint arXiv:2106.15432, 2021.
740
+ [15] Marco Cerezo, Alexander Poremba, Lukasz Cincio, and
741
+ Patrick J Coles. Variational quantum fidelity estima-
742
+ tion. Quantum, 4:248, 2020.
743
+ [16] Dmytro Bondarenko and Polina Feldmann. Quantum
744
+ autoencoders to denoise quantum data. Physical review
745
+ letters, 124(13):130502, 2020.
746
+ [17] Edward
747
+ Farhi
748
+ and
749
+ Aram
750
+ W
751
+ Harrow.
752
+ Quantum
753
+ supremacy through the quantum approximate optimiza-
754
+ tion algorithm. arXiv preprint arXiv:1602.07674, 2016.
755
+ [18] Leo Zhou, Sheng-Tao Wang, Soonwon Choi, Hannes
756
+ Pichler, and Mikhail D Lukin. Quantum approximate
757
+ optimization algorithm: Performance, mechanism, and
758
+ implementation on near-term devices. Physical Review
759
+ X, 10(2):021067, 2020.
760
+ [19] Matthew P Harrigan, Kevin J Sung, Matthew Neeley,
761
+ Kevin J Satzinger, Frank Arute, Kunal Arya, Juan Ata-
762
+ laya, Joseph C Bardin, Rami Barends, Sergio Boixo,
763
+ et al. Quantum approximate optimization of non-planar
764
+ graph problems on a planar superconducting processor.
765
+ Nature Physics, 17(3):332–336, 2021.
766
+ [20] Zeqiao Zhou, Yuxuan Du, Xinmei Tian, and Dacheng
767
+ Tao.
768
+ Qaoa-in-qaoa: solving large-scale maxcut prob-
769
+ lems on small quantum machines.
770
+ arXiv preprint
771
+ arXiv:2205.11762, 2022.
772
+ [21] Guido Pagano,
773
+ Aniruddha Bapat,
774
+ Patrick Becker,
775
+ Katherine S Collins, Arinjoy De, Paul W Hess, Har-
776
+ vey B Kaplan, Antonis Kyprianidis, Wen Lin Tan,
777
+ Christopher Baldwin, et al. Quantum approximate op-
778
+ timization of the long-range ising model with a trapped-
779
+ ion quantum simulator.
780
+ Proceedings of the National
781
+ Academy of Sciences, 117(41):25396–25401, 2020.
782
+ [22] Vojtˇech Havl´ıˇcek, Antonio D C´orcoles, Kristan Temme,
783
+ Aram W Harrow, Abhinav Kandala, Jerry M Chow, and
784
+ Jay M Gambetta. Supervised learning with quantum-
785
+ enhanced feature spaces.
786
+ Nature, 567(7747):209–212,
787
+ 2019.
788
+ [23] He-Liang Huang, Yuxuan Du, Ming Gong, Youwei
789
+ Zhao, Yulin Wu, Chaoyue Wang, Shaowei Li, Futian
790
+ Liang, Jin Lin, Yu Xu, et al. Experimental quantum
791
+ generative adversarial networks for image generation.
792
+ Physical Review Applied, 16(2):024051, 2021.
793
+ [24] Jinkai Tian, Xiaoyu Sun, Yuxuan Du, Shanshan Zhao,
794
+ Qing Liu, Kaining Zhang, Wei Yi, Wanrong Huang,
795
+ Chaoyue Wang, Xingyao Wu, et al. Recent advances for
796
+ quantum neural networks in generative learning. arXiv
797
+ preprint arXiv:2206.03066, 2022.
798
+ [25] Xinbiao Wang, Yuxuan Du, Yong Luo, and Dacheng
799
+ Tao. Towards understanding the power of quantum ker-
800
+ nels in the nisq era. Quantum, 5:531, 2021.
801
+ [26] Yuxuan Du, Zhuozhuo Tu, Bujiao Wu, Xiao Yuan, and
802
+ Dacheng Tao.
803
+ Theory of quantum generative learn-
804
+ ing models with maximum mean discrepancy.
805
+ arXiv
806
+ preprint arXiv:2205.04730, 2022.
807
+ [27] Maria Schuld and Nathan Killoran. Quantum machine
808
+ learning in feature hilbert spaces. Physical review let-
809
+ ters, 122(4):040504, 2019.
810
+ [28] Kosuke Mitarai, Makoto Negoro, Masahiro Kitagawa,
811
+ and Keisuke Fujii.
812
+ Quantum circuit learning.
813
+ arXiv
814
+ preprint arXiv:1803.00745, 2018.
815
+
816
+ 7
817
+ [29] Maria Schuld, Alex Bocharov, Krysta M Svore, and
818
+ Nathan Wiebe.
819
+ Circuit-centric quantum classifiers.
820
+ Physical Review A, 101(3):032308, 2020.
821
+ [30] Guangxi Li, Zhixin Song, and Xin Wang. Vsql: Vari-
822
+ ational shadow quantum learning for classification. In
823
+ Proceedings of the AAAI Conference on Artificial Intel-
824
+ ligence, volume 35, pages 8357–8365, 2021.
825
+ [31] Adri´an P´erez-Salinas, Alba Cervera-Lierta, Elies Gil-
826
+ Fuster, and Jos´e I Latorre.
827
+ Data re-uploading for a
828
+ universal quantum classifier. Quantum, 4:226, 2020.
829
+ [32] Weikang Li and Dong-Ling Deng. Recent advances for
830
+ quantum classifiers. Science China Physics, Mechanics
831
+ & Astronomy, 65(2):1–23, 2022.
832
+ [33] Yuxuan Du,
833
+ Min-Hsiu Hsieh,
834
+ Tongliang Liu,
835
+ and
836
+ Dacheng Tao.
837
+ A grover-search based quantum learn-
838
+ ing scheme for classification. New Journal of Physics,
839
+ 23(2):023020, 2021.
840
+ [34] Samuel Yen-Chi Chen, Chih-Min Huang, Chia-Wei Hs-
841
+ ing, and Ying-Jer Kao. An end-to-end trainable hybrid
842
+ classical-quantum classifier. Machine Learning: Science
843
+ and Technology, 2(4):045021, 2021.
844
+ [35] Evan Peters,
845
+ Jo˜ao Caldeira,
846
+ Alan Ho,
847
+ Stefan Le-
848
+ ichenauer, Masoud Mohseni, Hartmut Neven, Panagi-
849
+ otis Spentzouris, Doug Strain, and Gabriel N Perdue.
850
+ Machine learning of high dimensional data on a noisy
851
+ quantum processor. npj Quantum Information, 7(1):1–
852
+ 5, 2021.
853
+ [36] Iris Cong, Soonwon Choi, and Mikhail D Lukin. Quan-
854
+ tum convolutional neural networks.
855
+ Nature Physics,
856
+ 15(12):1273–1278, 2019.
857
+ [37] Ming Gong, He-Liang Huang, Shiyu Wang, Chu Guo,
858
+ Shaowei Li, Yulin Wu, Qingling Zhu, Youwei Zhao,
859
+ Shaojun Guo, Haoran Qian, et al. Quantum neuronal
860
+ sensing of quantum many-body states on a 61-qubit pro-
861
+ grammable superconducting processor. arXiv preprint
862
+ arXiv:2201.05957, 2022.
863
+ [38] Johannes Herrmann, Sergi Masot Llima, Ants Remm,
864
+ Petr Zapletal, Nathan A McMahon, Colin Scarato,
865
+ Fran¸cois
866
+ Swiadek,
867
+ Christian
868
+ Kraglund
869
+ Andersen,
870
+ Christoph Hellings, Sebastian Krinner, et al.
871
+ Realiz-
872
+ ing quantum convolutional neural networks on a su-
873
+ perconducting quantum processor to recognize quantum
874
+ phases. Nature Communications, 13(1):1–7, 2022.
875
+ [39] Huili Zhang, Si Jiang, Xin Wang, Wengang Zhang, Xi-
876
+ anzhi Huang, Xiaolong Ouyang, Yefei Yu, Yanqing Liu,
877
+ Dong-Ling Deng, and L-M Duan. Experimental demon-
878
+ stration of adversarial examples in learning topological
879
+ phases. Nature communications, 13(1):1–8, 2022.
880
+ [40] Edward Grant, Marcello Benedetti, Shuxiang Cao, An-
881
+ drew Hallam, Joshua Lockhart, Vid Stojevic, Andrew G
882
+ Green, and Simone Severini. Hierarchical quantum clas-
883
+ sifiers. npj Quantum Information, 4(1):1–8, 2018.
884
+ [41] Xu-Fei Yin, Yuxuan Du, Yue-Yang Fei, Rui Zhang,
885
+ Li-Zheng Liu, Yingqiu Mao, Tongliang Liu, Min-Hsiu
886
+ Hsieh, Li Li, Nai-Le Liu, et al. Efficient bipartite entan-
887
+ glement detection scheme with a quantum adversarial
888
+ solver. Physical Review Letters, 128(11):110501, 2022.
889
+ [42] Amira Abbas, David Sutter, Christa Zoufal, Aur´elien
890
+ Lucchi, Alessio Figalli, and Stefan Woerner. The power
891
+ of quantum neural networks. Nature Computational Sci-
892
+ ence, 1(6):403–409, 2021.
893
+ [43] Yuxuan Du, Min-Hsiu Hsieh, Tongliang Liu, Shan You,
894
+ and Dacheng Tao. Learnability of quantum neural net-
895
+ works. PRX Quantum, 2(4):040337, 2021.
896
+ [44] Hsin-Yuan
897
+ Huang,
898
+ Michael
899
+ Broughton,
900
+ Masoud
901
+ Mohseni,
902
+ Ryan
903
+ Babbush,
904
+ Sergio
905
+ Boixo,
906
+ Hartmut
907
+ Neven, and Jarrod R McClean.
908
+ Power of data in
909
+ quantum machine learning.
910
+ Nature communications,
911
+ 12(1):1–9, 2021.
912
+ [45] Yuxuan Du,
913
+ Min-Hsiu Hsieh,
914
+ Tongliang Liu,
915
+ and
916
+ Dacheng Tao. Expressive power of parametrized quan-
917
+ tum circuits. Phys. Rev. Research, 2:033125, Jul 2020.
918
+ [46] Tobias Haug, Kishor Bharti, and MS Kim. Capacity and
919
+ quantum geometry of parametrized quantum circuits.
920
+ PRX Quantum, 2(4):040309, 2021.
921
+ [47] Huitao Shen, Pengfei Zhang, Yi-Zhuang You, and Hui
922
+ Zhai. Information scrambling in quantum neural net-
923
+ works. Physical Review Letters, 124(20):200504, 2020.
924
+ [48] Yadong Wu, Juan Yao, Pengfei Zhang, and Hui Zhai.
925
+ Expressivity of quantum neural networks. Physical Re-
926
+ view Research, 3(3):L032049, 2021.
927
+ [49] Eric R Anschuetz and Bobak T Kiani. Quantum varia-
928
+ tional algorithms are swamped with traps. Nature Com-
929
+ munications, 13(1):1–10, 2022.
930
+ [50] Norihito Shirai, Kenji Kubo, Kosuke Mitarai, and
931
+ Keisuke Fujii. Quantum tangent kernel. arXiv preprint
932
+ arXiv:2111.02951, 2021.
933
+ [51] Zo¨e
934
+ Holmes,
935
+ Kunal
936
+ Sharma,
937
+ Marco
938
+ Cerezo,
939
+ and
940
+ Patrick J Coles. Connecting ansatz expressibility to gra-
941
+ dient magnitudes and barren plateaus. PRX Quantum,
942
+ 3(1):010313, 2022.
943
+ [52] Leonardo Banchi, Jason Pereira, and Stefano Piran-
944
+ dola.
945
+ Generalization in quantum machine learning:
946
+ A quantum information standpoint.
947
+ PRX Quantum,
948
+ 2(4):040321, 2021.
949
+ [53] Matthias C Caro, Elies Gil-Fuster, Johannes Jakob
950
+ Meyer,
951
+ Jens Eisert,
952
+ and Ryan Sweke.
953
+ Encoding-
954
+ dependent
955
+ generalization
956
+ bounds
957
+ for
958
+ parametrized
959
+ quantum circuits. Quantum, 5:582, 2021.
960
+ [54] Matthias C Caro, Hsin-Yuan Huang, M Cerezo, Ku-
961
+ nal Sharma, Andrew Sornborger, Lukasz Cincio, and
962
+ Patrick J Coles.
963
+ Generalization in quantum ma-
964
+ chine learning from few training data. arXiv preprint
965
+ arXiv:2111.05292, 2021.
966
+ [55] Yuxuan Du, Zhuozhuo Tu, Xiao Yuan, and Dacheng
967
+ Tao.
968
+ Efficient measure for the expressivity of varia-
969
+ tional quantum algorithms.
970
+ Physical Review Letters,
971
+ 128(8):080506, 2022.
972
+ [56] Casper Gyurik, Dyon van Vreumingen, and Vedran
973
+ Dunjko. Structural risk minimization for quantum lin-
974
+ ear classifiers. arXiv preprint arXiv:2105.05566, 2021.
975
+ [57] Hsin-Yuan Huang, Richard Kueng, and John Preskill.
976
+ Information-theoretic
977
+ bounds
978
+ on
979
+ quantum
980
+ advan-
981
+ tage in machine learning.
982
+ Physical Review Letters,
983
+ 126(19):190505, 2021.
984
+ [58] Hsin-Yuan Huang, Michael Broughton, Jordan Cotler,
985
+ Sitan Chen, Jerry Li, Masoud Mohseni, Hartmut Neven,
986
+ Ryan Babbush, Richard Kueng, John Preskill, et al.
987
+ Quantum advantage in learning from experiments. Sci-
988
+ ence, 376(6598):1182–1186, 2022.
989
+ [59] Carlo Ciliberto, Andrea Rocchetto, Alessandro Rudi,
990
+ and Leonard Wossnig. Statistical limits of supervised
991
+ quantum learning. Physical Review A, 102(4):042414,
992
+ 2020.
993
+ [60] Jonas Landman, Slimane Thabet, Constantin Dalyac,
994
+ Hela Mhiri, and Elham Kashefi.
995
+ Classically Approx-
996
+ imating Variational Quantum Machine Learning with
997
+ Random Fourier Features, 2022. arXiv:2210.13200v1.
998
+
999
+ 8
1000
+ [61] Giacomo De Palma, Milad Marvian, Cambyse Rouz´e,
1001
+ and Daniel Stilck Fran¸ca.
1002
+ Limitations of variational
1003
+ quantum algorithms: a quantum optimal transport ap-
1004
+ proach. arXiv preprint arXiv:2204.03455, 2022.
1005
+ [62] Franz J Schreiber, Jens Eisert, and Johannes Jakob
1006
+ Meyer. Classical surrogates for quantum learning mod-
1007
+ els. arXiv preprint arXiv:2206.11740, 2022.
1008
+ [63] Yunchao Liu, Srinivasan Arunachalam, and Kristan
1009
+ Temme. A rigorous and robust quantum speed-up in su-
1010
+ pervised machine learning. Nature Physics, 17(9):1013–
1011
+ 1017, 2021.
1012
+ [64] Yang Qian, Xinbiao Wang, Yuxuan Du, Xingyao Wu,
1013
+ and Dacheng Tao. The dilemma of quantum neural net-
1014
+ works. arXiv preprint arXiv:2106.04975, 2021.
1015
+ [65] Mehryar Mohri, Afshin Rostamizadeh, and Ameet Tal-
1016
+ walkar. Foundations of machine learning. MIT press,
1017
+ 2018.
1018
+ [66] Preetum Nakkiran, Gal Kaplun, Yamini Bansal, Tristan
1019
+ Yang, Boaz Barak, and Ilya Sutskever.
1020
+ Deep double
1021
+ descent: Where bigger models and more data hurt. In
1022
+ International Conference on Learning Representations,
1023
+ 2020.
1024
+ [67] Mikhail Belkin, Daniel Hsu, Siyuan Ma, and Soumik
1025
+ Mandal. Reconciling modern machine-learning practice
1026
+ and the classical bias–variance trade-off.
1027
+ Proceedings
1028
+ of the National Academy of Sciences, 116(32):15849–
1029
+ 15854, 2019.
1030
+ [68] Michael J Bremner, Caterina Mora, and Andreas Win-
1031
+ ter. Are random pure states useful for quantum compu-
1032
+ tation? Physical review letters, 102(19):190502, 2009.
1033
+ [69] David Gross,
1034
+ Steve T Flammia,
1035
+ and Jens Eisert.
1036
+ Most quantum states are too entangled to be useful
1037
+ as computational resources.
1038
+ Physical review letters,
1039
+ 102(19):190501, 2009.
1040
+ [70] Huan Xu and Shie Mannor. Robustness and general-
1041
+ ization. In Adam Tauman Kalai and Mehryar Mohri,
1042
+ editors, COLT 2010 - The 23rd Conference on Learning
1043
+ Theory, Haifa, Israel, June 27-29, 2010, pages 503–515.
1044
+ Omnipress, 2010.
1045
+ [71] Christopher M Bishop and Nasser M Nasrabadi. Pattern
1046
+ recognition and machine learning, volume 4. Springer,
1047
+ 2006.
1048
+ [72] Vardan Papyan, XY Han, and David L Donoho. Preva-
1049
+ lence of neural collapse during the terminal phase of
1050
+ deep learning training.
1051
+ Proceedings of the National
1052
+ Academy of Sciences, 117(40):24652–24663, 2020.
1053
+ [73] Yibo Yang, Shixiang Chen, Xiangtai Li, Liang Xie,
1054
+ Zhouchen Lin, and Dacheng Tao. Inducing neural col-
1055
+ lapse in imbalanced learning: Do we really need a learn-
1056
+ able classifier at the end of deep neural network?
1057
+ In
1058
+ NeurIPS, 2022.
1059
+ [74] Joonwoo Bae and Leong-Chuan Kwek. Quantum state
1060
+ discrimination and its applications. Journal of Physics
1061
+ A: Mathematical and Theoretical, 48(8):083001, 2015.
1062
+ [75] for any two varied classes, o(k) and o(k′) classify ¯ρ∗(k)
1063
+ and ¯ρ∗(k′) with probability 1.
1064
+ [76] Bingzhi Zhang and Quntao Zhuang. Fast decay of classi-
1065
+ fication error in variational quantum circuits. Quantum
1066
+ Science and Technology, 2022.
1067
+ [77] Note that Conditions (i)&(ii) imply that {¯ρ∗(k)} forms
1068
+ an orthogonal frame. Since any orthogonal frame can
1069
+ trivially be turned into a simplex ETF by reducing its
1070
+ global mean, we argue that {¯ρ∗(k)} composes ETF.
1071
+ [78] Joseph M Renes, Robin Blume-Kohout, Andrew J
1072
+ Scott, and Carlton M Caves. Symmetric information-
1073
+ ally complete quantum measurements. Journal of Math-
1074
+ ematical Physics, 45(6):2171–2180, 2004.
1075
+ [79] Andrew J Scott. Tight informationally complete quan-
1076
+ tum measurements. Journal of Physics A: Mathematical
1077
+ and General, 39(43):13507, 2006.
1078
+ [80] Guillermo
1079
+ Garc´ıa-P´erez,
1080
+ Matteo
1081
+ AC
1082
+ Rossi,
1083
+ Boris
1084
+ Sokolov, Francesco Tacchino, Panagiotis Kl Barkoutsos,
1085
+ Guglielmo Mazzola, Ivano Tavernelli, and Sabrina Man-
1086
+ iscalco. Learning to measure: Adaptive informationally
1087
+ complete generalized measurements for quantum algo-
1088
+ rithms. Prx quantum, 2(4):040342, 2021.
1089
+ [81] Joel A Tropp. Complex equiangular tight frames. In
1090
+ Wavelets XI, volume 5914, page 591401. SPIE, 2005.
1091
+ [82] M´aty´as A Sustik, Joel A Tropp, Inderjit S Dhillon, and
1092
+ Robert W Heath Jr. On the existence of equiangular
1093
+ tight frames. Linear Algebra and its applications, 426(2-
1094
+ 3):619–635, 2007.
1095
+ [83] Marco Cerezo, Akira Sone, Tyler Volkoff, Lukasz Cin-
1096
+ cio, and Patrick J Coles. Cost function dependent bar-
1097
+ ren plateaus in shallow parametrized quantum circuits.
1098
+ Nature communications, 12(1):1–12, 2021.
1099
+ [84] Stefan H Sack, Raimel A Medina, Alexios A Michai-
1100
+ lidis, Richard Kueng, and Maksym Serbyn. Avoiding
1101
+ barren plateaus using classical shadows.
1102
+ PRX Quan-
1103
+ tum, 3(2):020365, 2022.
1104
+ [85] Hsin-Yuan Huang, Richard Kueng, and John Preskill.
1105
+ Predicting many properties of a quantum system from
1106
+ very few measurements. Nature Physics, 16(10):1050–
1107
+ 1057, 2020.
1108
+ [86] Hsin-Yuan Huang. Learning quantum states from their
1109
+ classical shadows. Nature Reviews Physics, 4(2):81–81,
1110
+ 2022.
1111
+ [87] Junyu Liu, Zexi Lin, and Liang Jiang. Laziness, barren
1112
+ plateau, and noise in machine learning. arXiv preprint
1113
+ arXiv:2206.09313, 2022.
1114
+ [88] Junyu Liu, Khadijeh Najafi, Kunal Sharma, Francesco
1115
+ Tacchino, Liang Jiang, and Antonio Mezzacapo. An an-
1116
+ alytic theory for the dynamics of wide quantum neural
1117
+ networks. arXiv preprint arXiv:2203.16711, 2022.
1118
+ [89] Xinbiao Wang, Junyu Liu, Tongliang Liu, Yong Luo,
1119
+ Yuxuan Du, and Dacheng Tao.
1120
+ Symmetric prun-
1121
+ ing in quantum neural networks.
1122
+ arXiv preprint
1123
+ arXiv:2208.14057, 2022.
1124
+ [90] Xuchen
1125
+ You,
1126
+ Shouvanik
1127
+ Chakrabarti,
1128
+ and
1129
+ Xiaodi
1130
+ Wu.
1131
+ A convergence theory for over-parameterized
1132
+ variational
1133
+ quantum
1134
+ eigensolvers.
1135
+ arXiv
1136
+ preprint
1137
+ arXiv:2205.12481, 2022.
1138
+ [91] Seth Lloyd, Maria Schuld, Aroosa Ijaz, Josh Izaac, and
1139
+ Nathan Killoran.
1140
+ Quantum embeddings for machine
1141
+ learning. arXiv preprint arXiv:2001.03622, 2020.
1142
+ [92] Nhat A Nghiem, Samuel Yen-Chi Chen, and Tzu-Chieh
1143
+ Wei.
1144
+ Unified framework for quantum classification.
1145
+ Physical Review Research, 3(3):033056, 2021.
1146
+ [93] Ryan LaRose and Brian Coyle.
1147
+ Robust data en-
1148
+ codings for quantum classifiers.
1149
+ Physical Review A,
1150
+ 102(3):032420, 2020.
1151
+ [94] Ben Jaderberg, Lewis W Anderson, Weidi Xie, Samuel
1152
+ Albanie, Martin Kiffner, and Dieter Jaksch. Quantum
1153
+ self-supervised learning. Quantum Science and Technol-
1154
+ ogy, 7(3):035005, 2022.
1155
+ [95] Rui Yang, Samuel Bosch, Bobak Kiani, Seth Lloyd, and
1156
+ Adrian Lupascu. An analog quantum variational em-
1157
+
1158
+ 9
1159
+ bedding classifier, 2022. arXiv:2211.02748v1.
1160
+ [96] The exact form of the first term in the generalization
1161
+ bound should be 4L1KC2f(U(θ))ϵ with f(U(θ)) ≤ 1
1162
+ for any Ansatz. Therefore, for simplicity, we discard the
1163
+ term f(U(θ)).
1164
+ [97] Masahito Hayashi and Yuxiang Yang.
1165
+ Efficient al-
1166
+ gorithms for quantum information bottleneck.
1167
+ arXiv
1168
+ preprint arXiv:2208.10342, 2022.
1169
+ [98] Andrew W Cross, Graeme Smith, and John A Smolin.
1170
+ Quantum learning robust against noise. Physical Review
1171
+ A, 92(1):012327, 2015.
1172
+ [99] Diego Rist`e, Marcus P Da Silva, Colm A Ryan, An-
1173
+ drew W Cross, Antonio D C´orcoles, John A Smolin,
1174
+ Jay M Gambetta, Jerry M Chow, and Blake R John-
1175
+ son. Demonstration of quantum advantage in machine
1176
+ learning. npj Quantum Information, 3(1):1–5, 2017.
1177
+ [100] Pinaki Sen, Amandeep Singh Bhatia, Kamalpreet Singh
1178
+ Bhangu, and Ahmed Elbeltagi.
1179
+ Variational quantum
1180
+ classifiers through the lens of the hessian.
1181
+ Plos one,
1182
+ 17(1):e0262346, 2022.
1183
+ [101] The disappeared double-descent curve of MLP is caused
1184
+ by the limited train data. In other words,
1185
+ over-
1186
+ parameterization and sufficient train data are two nec-
1187
+ essary conditions to induce the double-descent curve,
1188
+ while parity dataset can only provide limited train data.
1189
+ [102] Han Xiao, Kashif Rasul, and Roland Vollgraf. Fashion-
1190
+ mnist: a novel image dataset for benchmarking machine
1191
+ learning algorithms, 2017.
1192
+ [103] Maria Schuld and Nathan Killoran. Is quantum advan-
1193
+ tage the right goal for quantum machine learning? PRX
1194
+ Quantum, 3:030101, Jul 2022.
1195
+ [104] Zo¨e Holmes, Nolan Coble, Andrew T Sornborger, and
1196
+ Yi˘git Suba¸sı. On nonlinear transformations in quantum
1197
+ computation. arXiv preprint arXiv:2112.12307, 2021.
1198
+ [105] Kenji Kawaguchi, Zhun Deng, Kyle Luh, and Jiaoyang
1199
+ Huang.
1200
+ Robustness implies generalization via data-
1201
+ dependent generalization bounds.
1202
+ In International
1203
+ Conference on Machine Learning, pages 10866–10894.
1204
+ PMLR, 2022.
1205
+ [106] Thomas Barthel and Jianfeng Lu. Fundamental limita-
1206
+ tions for measurements in quantum many-body systems.
1207
+ Phys. Rev. Lett., 121:080406, Aug 2018.
1208
+ [107] Amit Daniely and Eran Malach. Learning parities with
1209
+ neural networks. Advances in Neural Information Pro-
1210
+ cessing Systems, 33:20356–20365, 2020.
1211
+ [108] Boaz Barak, Benjamin L Edelman, Surbhi Goel, Sham
1212
+ Kakade, Eran Malach, and Cyril Zhang.
1213
+ Hidden
1214
+ progress in deep learning: Sgd learns parities near the
1215
+ computational limit. arXiv preprint arXiv:2207.08799,
1216
+ 2022.
1217
+ [109] Ian Goodfellow, Yoshua Bengio, and Aaron Courville.
1218
+ Deep learning. MIT press, 2016.
1219
+ [110] John Duchi, Elad Hazan, and Yoram Singer. Adaptive
1220
+ subgradient methods for online learning and stochas-
1221
+ tic optimization. Journal of machine learning research,
1222
+ 12(7), 2011.
1223
+
1224
+ 10
1225
+ The organization of the supplementary material (SM) is as follows. In SM A, we present the proof of Theorem 1.
1226
+ Then, we provide the proof of Corollary 1 in SM B. Subsequently, we demonstrate the proof of Lemma 1 and Lemma
1227
+ 2 in SM C and SM D, respectively. Next, in SM E, we exhibit the proof of Lemma 3. In the end, we elucidate the
1228
+ details of numerical simulations in SM G.
1229
+ SM A: Proof of Theorem 1
1230
+ For convenience, let us first recall the settings and notations introduced in the main text. When QCs are applied to
1231
+ accomplish the multi-class classification task, the training dataset D contains n examples and the number of examples
1232
+ in each class is the same with n = ncK. Moreover, the per-sample loss is specified as the mean square error.
1233
+ We next introduce the formal description of Theorem 1. In particular, Theorem 1 is established on Lemma 2, where
1234
+ the regularization term is set as zero (i.e., E = 0) and the set of measure operator is predefined, i.e., o spans the
1235
+ space C2D×2D and satisfies Tr(o(k)o(k′)) = Bδk,k′ where B ≥ 1 is a constant. The requirements of o aims to preserve
1236
+ Condition (iii) in Lemma 1. Note that the focus on these specific settings adopted in Lemma 1 instead of the most
1237
+ general settings (i.e., o is tunable and E is nonzero) is motivated by Lemma 1, which promises a lower expected risk.
1238
+ Following the above elaboration, the loss function of QC to be minimized can be explicitly written as
1239
+ L(ρ) = 1
1240
+ 2n
1241
+ nc
1242
+
1243
+ i=1
1244
+ K
1245
+
1246
+ k=1
1247
+
1248
+ [Tr(ρ(i,k)o(k))]k=1:K − y(i,k)�2
1249
+ ,
1250
+ (A1)
1251
+ where y(i,k) is the unit basis whose k-th entry is 1 for ∀i ∈ [nc], ∀k ∈ [K]. Denote ρ∗ = minρ L(ρ) and the empirical
1252
+ risk of QC as RERM(ˆhQ) with ˆhQ ≡ ˆhQ(ρ∗). The formal statement of Theorem 1 is as follows.
1253
+ Theorem (Formal statement of Theorem 1). Following notations in Lemmas 2 and 3, with probability 1 − δ, the
1254
+ expected risk of QC tends to be zero, i.e., RERM(ˆhQ) = 0, when the size of train dataset satisfies n ≫ O(KNge log KNg
1255
+ ϵδ )
1256
+ and the global minimizer ρ∗ in Eq. (A1) satisfies
1257
+ (i)¯ρ∗(k) := ρ∗(1,k) = ... = ρ∗(nc,k); (ii) Tr(¯ρ∗(k)¯ρ∗(k′)) = Bδk,k′; (iii) Tr(¯ρ∗(k)o(k′)) = δk,k′.
1258
+ (A2)
1259
+ Proof of Theorem 1. Following Eq. (2) and the results in Lemma 3, with probability 1 − δ, the expected risk of an
1260
+ optimal empirical QC is upper bounded by
1261
+ R(ˆhQ) ≤ RERM(ˆhQ) + 4L1KC2ϵ + 3ξ(ˆh)
1262
+
1263
+ |TD|4mNge ln(56KNge/(ϵδ))
1264
+ n
1265
+ + ξ(ˆh)2|TD|4mNge ln(56KNge/(ϵδ))
1266
+ n
1267
+ .
1268
+ (A3)
1269
+ Then, when ρ∗ satisfies Eq. (A2), Lemma 2 warrants RERM(ˆhQ) = 0, which gives
1270
+ R(ˆhQ) ≤ 4L1KC2ϵ + 3ξ(ˆh)
1271
+
1272
+ |TD|4mNge ln(56KNge/(ϵδ))
1273
+ n
1274
+ + ξ(ˆh)2|TD|4mNge ln(56KNge/(ϵδ))
1275
+ n
1276
+ .
1277
+ (A4)
1278
+ This bound can be further simplified when the training of QC is perfect. Note that Condition (i) implies |TD| = K,
1279
+ since all feature states from the same class collapse to the same point. Meanwhile, since ξ(ˆh) and C2 are bounded,
1280
+ and m and ϵ are small constant, we can conclude that when n ≫ O(KNge log(KNg/(ϵδ))), the expected risk can
1281
+ approach to zero.
1282
+ SM B: Proof of Corollary 1
1283
+ The proof leverages the following two lemmas related to the Haar measure and the unitary t-design.
1284
+ Lemma 4. Let {Wy}y∈Y ⊂ U(d) form a unitary t-design with t > 1, and let A, B : Hd → Hd be arbitrary linear
1285
+ operators. Then
1286
+ 1
1287
+ |Y |
1288
+
1289
+ y∈Y
1290
+ Tr[WyAW †
1291
+ y B] =
1292
+
1293
+ Haar
1294
+ dµ(W) Tr[WyAW †
1295
+ y B] = Tr[A] Tr[B]
1296
+ d
1297
+ .
1298
+ (B1)
1299
+
1300
+ 11
1301
+ Lemma 5. Let {Wy}y∈Y ⊂ U(d) form a unitary t-design with t > 1, and let A, B, C, D : Hd → Hd be arbitrary
1302
+ linear operators. Then
1303
+ 1
1304
+ |Y |
1305
+
1306
+ y∈Y
1307
+ Tr[WyAW †
1308
+ y B] Tr[WyCW †
1309
+ y D] =
1310
+
1311
+ Haar
1312
+ dµ(W) Tr[WyAW †
1313
+ y B] Tr[WyCW †
1314
+ y D]
1315
+ =
1316
+ 1
1317
+ d2 − 1 (Tr[A] Tr[B] Tr[C] Tr[D] + Tr[AC] Tr[BD])
1318
+
1319
+ 1
1320
+ d(d2 − 1) (Tr[AC] Tr[B] Tr[D] + Tr[A] Tr[C] Tr[BD]) .
1321
+ (B2)
1322
+ Corollary (Restatement of Corollary 1). Following notations in Lemmas 2 and 3, when the encoding unitary
1323
+ {UE(x)|x ∈ X} follows the Haar distribution, with probability 1 − δ, the empirical QC follows | Tr
1324
+
1325
+ σ(x(i,k))σ(x)
1326
+
1327
+
1328
+ 1
1329
+ 2N | ≤
1330
+
1331
+ 3
1332
+ 22Nδ. When the adopted Ansatz {U(θ)|θ ∈ Θ} follows the Haar distribution, with probability 1 − δ, the
1333
+ empirical QC follows | Tr(ρ(i,k)o(k′)) − Tr(o(k′))
1334
+ 2D
1335
+ | <
1336
+
1337
+ Tr(o(k′))2+2 Tr((o(k′))2)
1338
+ 22Dδ
1339
+ .
1340
+ Proof of Corollary 1. We complete the proof by separately analyzing the concentration behavior of the encoding
1341
+ unitary and the Ans¨atze.
1342
+ Concentration of the encoding unitary. Recall that Condition (iii) in Lemma 2 concerns the distance between two
1343
+ feature states ρ(i,k) and ρ(i′,k′) for ∀i, i ∈ [nc] and ∀k, k′ ∈ [K]. In this regard, we quantify the distance between the
1344
+ encoded state σ(x(i,k)) and σ(x) with x ∼ X when the deep encoding Ansatz UE is employed. In particular, we have
1345
+ Ex∼X
1346
+
1347
+ Tr
1348
+
1349
+ σ(x(i,k))σ(x)
1350
+ ��
1351
+ =Ex∼X
1352
+
1353
+ Tr
1354
+
1355
+ σ(x(i,k))UE(x)(|0⟩ ⟨0|)⊗NUE(x)†��
1356
+ =
1357
+
1358
+ Haar
1359
+ dµ(U) Tr
1360
+
1361
+ σ(x(i,k))U(|0⟩ ⟨0|)⊗NU
1362
+
1363
+ =Tr(σ(x(i,k))) Tr(|0⟩ ⟨0|)⊗N)
1364
+ 2N
1365
+ = 1
1366
+ 2N ,
1367
+ (B3)
1368
+ where the third equality uses Lemma 4. Moreover, the variance of the term Tr(σ(x(i,k))σ(x)) yields
1369
+ Varx∼X
1370
+
1371
+ Tr
1372
+
1373
+ σ(x(i,k))σ(x)
1374
+ ��
1375
+ =Ex∼X
1376
+
1377
+ Tr
1378
+
1379
+ σ(x(i,k))σ(x)
1380
+ �2�
1381
+ − Ex∼X
1382
+
1383
+ Tr
1384
+
1385
+ σ(x(i,k))σ(x)
1386
+ ��2
1387
+ =
1388
+
1389
+ Haar
1390
+ dµ(U) Tr
1391
+
1392
+ σ(x(i,k))U(|0⟩ ⟨0|)⊗NU
1393
+
1394
+ Tr
1395
+
1396
+ σ(x(i,k))U(|0⟩ ⟨0|)⊗NU
1397
+
1398
+
1399
+ 1
1400
+ 22N
1401
+ =
1402
+ 1
1403
+ 22N − 1
1404
+
1405
+ 1 + Tr(σ(x(i,k))2)
1406
+
1407
+
1408
+ 1
1409
+ 22N(22N − 1)
1410
+
1411
+ Tr(σ(x(i,k))2) + 1
1412
+
1413
+
1414
+ 1
1415
+ 22N
1416
+
1417
+ 1
1418
+ 22N−2 −
1419
+ 1
1420
+ 22N
1421
+ = 3
1422
+ 22N ,
1423
+ (B4)
1424
+ where the second equality uses the property that the deep encoding unitary follows the Haar distribution and the
1425
+ result in Eq. (B3), the third equality comes from Lemma 4, the inequality adopts Tr(σ2) ≤ 1 and 22N − 1 > 22N−1,
1426
+ and the last equality is obtained via simplification.
1427
+ Supported by the Chebyshev’s inequality Pr(|X − E[X]| ≥ a) ≤ Var[X]/a2, Eqs. (B3) and (B4) indicate
1428
+ Pr
1429
+ ���� Tr
1430
+
1431
+ σ(x(i,k))σ(x)
1432
+
1433
+ − 1
1434
+ 2N
1435
+ ��� ≥ τ
1436
+
1437
+
1438
+ 3
1439
+ 22Nτ 2 .
1440
+ Equivalently, with probability 1 − δ, we have
1441
+ ��� Tr
1442
+
1443
+ σ(x(i,k))σ(x)
1444
+
1445
+ − 1
1446
+ 2N
1447
+ ��� ≤
1448
+
1449
+ 3
1450
+ 22Nδ .
1451
+ (B5)
1452
+
1453
+ 12
1454
+ Concentration of the deep Ansatze. Recall Condition (ii) in Lemma 2. Given a feature state ρ(i,k), for ∀i ∈ [nc] and
1455
+ ∀k ∈ [K] and a measure operator o(k), the optimal feature state should satisfy
1456
+ Tr(ρ∗(i,k)o(k′)) = δk,k′.
1457
+ In other words, we should examine the value of Tr(ρ(i,k)o(k′)) when ρ(i,k) is prepared by a deep Ansatze U(θ).
1458
+ Specifically, we have
1459
+ Eθ∼Θ
1460
+
1461
+ Tr(ρ(i,k)o(k′))
1462
+
1463
+ =Eθ∼Θ
1464
+
1465
+ Tr(U(θ)σ(x(i,k))U(θ)†(o(k′) ⊗ I2N−D)
1466
+
1467
+ =
1468
+
1469
+ Haar
1470
+ dµ(U) Tr
1471
+
1472
+ Uσ(x(i,k))U †(o(k′) ⊗ I2N−D)
1473
+
1474
+ =Tr(o(k′))(2N−D)
1475
+ 2N
1476
+ =Tr(o(k′))
1477
+ 2D
1478
+ ,
1479
+ (B6)
1480
+ where the first equality comes from the explicit form of QC in Eq. (4), the second equality uses the fact that U follows
1481
+ the Haar distribution, and the last second equality comes from Lemma 4.
1482
+ We then quantify the variance of Tr(ρ(i,k)o(k′)), i.e.,
1483
+ Varθ∼Θ
1484
+
1485
+ Tr(ρ(i,k)o(k′))
1486
+
1487
+ =Eθ∼Θ
1488
+
1489
+ Tr(ρ(i,k)o(k′))2�
1490
+
1491
+
1492
+ Eθ∼Θ
1493
+
1494
+ Tr(ρ(i,k)o(k′))
1495
+ ��2
1496
+ =
1497
+
1498
+ Haar
1499
+ dµ(U) Tr
1500
+
1501
+ Uσ(x(i,k))U †(o(k′) ⊗ I2N−D)
1502
+ �2
1503
+ − Tr(o(k′))2
1504
+ 22D
1505
+ =
1506
+ 1
1507
+ 22N − 1
1508
+
1509
+ Tr(σ(x(i,k))) Tr(o(k′) ⊗ I2N−D) Tr(σ(x(i,k))) Tr(o(k′) ⊗ I2N−D) + Tr(σ(x(i,k))2) Tr((o(k′) ⊗ I2N−D)2)
1510
+
1511
+
1512
+ 1
1513
+ 2N(22N − 1)
1514
+
1515
+ Tr(σ(x(i,k))2) Tr(o(k′) ⊗ I2N−D)2 + Tr(σ(x(i,k)))2 Tr((o(k′) ⊗ I2N−D)2)
1516
+
1517
+ − Tr(o(k′))2
1518
+ 22D
1519
+
1520
+ 1
1521
+ 22N − 1
1522
+
1523
+ Tr(o(k′) ⊗ I2N−D)2 + Tr((o(k′) ⊗ I2N−D)2)
1524
+
1525
+ − Tr(o(k′))2
1526
+ 22D
1527
+ =
1528
+ 1
1529
+ 22N − 1
1530
+
1531
+ Tr(o(k′))222N���2D + Tr((o(k′))2)22N−2D�
1532
+ − Tr(o(k′))2
1533
+ 22D
1534
+ ≤Tr(o(k′))2 + Tr((o(k′))2)
1535
+ 22D−1
1536
+ − Tr(o(k′))2
1537
+ 22D
1538
+ =Tr(o(k′))2 + 2 Tr((o(k′))2)
1539
+ 22D
1540
+ .
1541
+ (B7)
1542
+ where the second equality uses the fact that U follows the Haar distribution and Eq. (B6), the the third equality
1543
+ comes from Lemma 5, the first inequality arises from dropping some positive terms, the last second equality employs
1544
+ Tr(A⊗B) = Tr(A) Tr(B) and (A⊗B)(C⊗D) = (AC)⊗(BD), and the last inequality exploits (22N −1)−1 > (2N−1)−1,
1545
+ and the last equalities is obtained via simplification.
1546
+ Supported by the Chebyshev’s inequality Pr(|X − E[X]| ≥ a) ≤ Var[X]/a2, Eqs. (B6) and (B7) indicate
1547
+ Pr
1548
+ ���� Tr(ρ(i,k)o(k′)) − E
1549
+
1550
+ Tr(ρ(i,k)o(k′))
1551
+ � ��� ≥ τ
1552
+
1553
+ ≤ Tr(o(k′))2 + 2 Tr((o(k′))2)
1554
+ 22Dτ 2
1555
+ .
1556
+ Equivalently, with probability 1 − δ, we have
1557
+ ��� Tr(ρ(i,k)o(k′)) − Tr(o(k′))
1558
+ 2D
1559
+ ��� <
1560
+
1561
+ Tr(o(k′))2 + 2 Tr((o(k′))2)
1562
+ 22Dδ
1563
+ .
1564
+ (B8)
1565
+
1566
+ 13
1567
+ SM C: Proof of Lemma 1
1568
+ In this section, we derive the geometric properties of the global optimizer under the unconstraint loss function
1569
+ L(ρ, o) in which both ρ and o are tunable and the regularization term is considered. Mathematically, the regularizer
1570
+ in Eq. (1) is defined as E = λρ
1571
+ 2
1572
+ �nc
1573
+ i=1
1574
+ �K
1575
+ k=1 ∥ρ(i,k)∥2
1576
+ F + λo
1577
+ 2
1578
+ �K
1579
+ k=1 ∥o(k)∥2
1580
+ F with λρ and λo being hyper-parameters. The
1581
+ explicit form of the loss function is
1582
+ L(ρ, o) = 1
1583
+ 2n
1584
+ nc
1585
+
1586
+ i=1
1587
+ K
1588
+
1589
+ k=1
1590
+ ��
1591
+ Tr(ρ(i,k)o(k))
1592
+
1593
+ k=1:K − y(i,k)�2
1594
+ + λρ
1595
+ 2
1596
+ nc
1597
+
1598
+ i=1
1599
+ K
1600
+
1601
+ k=1
1602
+ ∥ρ(i,k)∥2
1603
+ F + λo
1604
+ 2
1605
+ K
1606
+
1607
+ j=1
1608
+ ∥o(j)∥2
1609
+ F .
1610
+ (C1)
1611
+ Denote the global optima as (ρ∗, o∗) = minρ,o L(ρ, o) and the empirical QC as ˆhQ ≡ hQ(ρ∗, o∗). The restatement of
1612
+ Lemma 1 is as follows.
1613
+ Lemma (Formal statement of Lemma 1). Define C1 := K
1614
+
1615
+ ncλoλρ. If 2D ≥ K, C1 ≤ 1, and λo ≤ ncλρ, the global
1616
+ minimizer (ρ∗, o∗) of L(ρ, o) in Eq. (C1) satisfies for ∀k, k′ ∈ [K]:
1617
+ (i)¯ρ∗(k) := ρ∗(1,k) = · · · = ρ∗(nc,k);
1618
+ (ii) Tr(¯ρ∗(k)¯ρ∗(k′)) = (1 − C1)
1619
+
1620
+ λo
1621
+ nλρ
1622
+ δk,k′;
1623
+ (iii)o∗(k) =
1624
+
1625
+ nλρ
1626
+ λo
1627
+ ¯ρ∗(k).
1628
+ (C2)
1629
+ The corresponding empirical risk is RERM(ˆhQ) = C1.
1630
+ Proof of Lemma 1. Conceptually, the global optimizer can be identified by lower bounding L(ρ, o), where the equality
1631
+ conditions of ρ amount to the properties of global minimizer. In particular, the lower bound of L(ρ, o) yields
1632
+ 1
1633
+ 2Knc
1634
+ nc
1635
+
1636
+ i=1
1637
+ K
1638
+
1639
+ k=1
1640
+
1641
+ [Tr(ρ(i,k)o(j))]j=1:K − y(i,k)�2
1642
+ + λρ
1643
+ 2
1644
+ nc
1645
+
1646
+ i=1
1647
+ K
1648
+
1649
+ k=1
1650
+ ∥ρ(i,k)∥2
1651
+ F + λo
1652
+ 2
1653
+ K
1654
+
1655
+ j=1
1656
+ ∥o(j)∥2
1657
+ F
1658
+
1659
+ 1
1660
+ 2Knc
1661
+ nc
1662
+
1663
+ i=1
1664
+ K
1665
+
1666
+ k=1
1667
+
1668
+ Tr(ρ(i,k)o(k)) − 1
1669
+ �2
1670
+ + λρ
1671
+ 2
1672
+ nc
1673
+
1674
+ i=1
1675
+ K
1676
+
1677
+ k=1
1678
+ ∥ρ(i,k)∥2
1679
+ F + λo
1680
+ 2
1681
+ K
1682
+
1683
+ j=1
1684
+ ∥o(j)∥2
1685
+ F
1686
+ =
1687
+ 1
1688
+ 2Knc
1689
+ K
1690
+
1691
+ k=1
1692
+ nc
1693
+
1694
+ i=1
1695
+ nc
1696
+ 1
1697
+ nc
1698
+
1699
+ Tr(ρ(i,k)o(k)) − 1
1700
+ �2
1701
+ + λρ
1702
+ 2
1703
+ K
1704
+
1705
+ k=1
1706
+ nc
1707
+
1708
+ i=1
1709
+ nc
1710
+ 1
1711
+ nc
1712
+ ∥ρ(i,k)∥2
1713
+ F + λo
1714
+ 2
1715
+ K
1716
+
1717
+ j=1
1718
+ ∥o(j)∥2
1719
+ F
1720
+ ≥ 1
1721
+ 2K
1722
+ K
1723
+
1724
+ k=1
1725
+
1726
+ Tr
1727
+ � nc
1728
+
1729
+ i=1
1730
+ 1
1731
+ nc
1732
+ ρ(i,k)o(k)
1733
+
1734
+ − 1
1735
+ �2
1736
+ + λρ
1737
+ 2
1738
+ K
1739
+
1740
+ k=1
1741
+ nc
1742
+ �����
1743
+ nc
1744
+
1745
+ i=1
1746
+ 1
1747
+ nc
1748
+ ρ(i,k)
1749
+ �����
1750
+ 2
1751
+ F
1752
+ + λo
1753
+ 2
1754
+ K
1755
+
1756
+ j=1
1757
+ ∥o(j)∥2
1758
+ F ,
1759
+ (C3)
1760
+ where the first inequality uses the fact ∥a − b∥2 = �
1761
+ i(a(i) − b(i))2 ≥ (a(k) − b(k))2 and the k-th entry of y(i,k) equals
1762
+ to 1, and the second inequality comes from the Jensen’s inequality f(E(x)) ≤ E(f(x)). The equality condition of the
1763
+ first inequality holds if and only if
1764
+ Tr
1765
+
1766
+ ρ(i,k)o(j)�
1767
+ = 0, (∀j ∈ [K] \ {k}) ∧ (∀i ∈ [nc]) ;
1768
+ and the equality condition of the second inequality holds if and only if
1769
+ ρ(1,k) = · · · = ρ(i,k) = · · · = ρ(nc,k), ∀k ∈ [K].
1770
+ Denote the mean of the feature state for the k-th class as ¯ρ(k) = �nc
1771
+ i=1
1772
+ 1
1773
+ nc ρ(i,k) for ∀k ∈ [K]. The above two equality
1774
+ conditions suggest that the global minimizer (ρ∗, o∗) satisfies
1775
+ ¯ρ∗(k) ≡ ρ∗(1,k) = · · · = ρ∗(nc,k), ∀k ∈ [K]
1776
+ Tr(¯ρ∗(k)o∗(j)) = 0, ∀j ∈ [K] \ {k}.
1777
+ (C4)
1778
+ To thins end, we obtain Conditions (i) in Lemma 1, which describe the geometric properties of ρ∗, i.e.,
1779
+ (i)¯ρ∗(k) := ρ∗(1,k) = · · · = ρ∗(nc,k).
1780
+ (C5)
1781
+
1782
+ 14
1783
+ The next part of the proof is showing that the global minimizer satisfies Condition (iii). Combining Eqs. (C3) and
1784
+ (C4), the lower bound of the loss function in Eq. (C3) follows
1785
+ L(ρ, o)
1786
+ ≥ 1
1787
+ 2K
1788
+ K
1789
+
1790
+ k=1
1791
+
1792
+ Tr
1793
+
1794
+ ¯ρ(k)o(k)�
1795
+ − 1
1796
+ �2
1797
+ + λρ
1798
+ 2
1799
+ K
1800
+
1801
+ k=1
1802
+ nc
1803
+ ���¯ρ(k)���
1804
+ 2
1805
+ F + λo
1806
+ 2
1807
+ K
1808
+
1809
+ j=1
1810
+ ∥o(j)∥2
1811
+ F
1812
+ = 1
1813
+ 2K
1814
+ K
1815
+
1816
+ k=1
1817
+
1818
+ Tr
1819
+
1820
+ ¯ρ(k)o(k)�
1821
+ − 1
1822
+ �2
1823
+ + λρ
1824
+ 2 K
1825
+ K
1826
+
1827
+ k=1
1828
+ 1
1829
+ K nc
1830
+ ���¯ρ(k)���
1831
+ 2
1832
+ F + λo
1833
+ 2 K
1834
+ K
1835
+
1836
+ j=1
1837
+ 1
1838
+ K ∥o(j)∥2
1839
+ F
1840
+ ≥1
1841
+ 2
1842
+ � K
1843
+
1844
+ k=1
1845
+ 1
1846
+ K Tr
1847
+
1848
+ ¯ρ(k)o(k)�
1849
+ − 1
1850
+ �2
1851
+ + λρ
1852
+ 2 Knc
1853
+ �����
1854
+ K
1855
+
1856
+ k=1
1857
+ 1
1858
+ K ¯ρ(k)
1859
+ �����
1860
+ 2
1861
+ F
1862
+ + λo
1863
+ 2 K∥
1864
+ K
1865
+
1866
+ j=1
1867
+ 1
1868
+ K o(j)∥2
1869
+ F ,
1870
+ (C6)
1871
+ where the second inequality comes from the Jensen’s inequality and the equality condition holds if and only if for
1872
+ ∀k, k′ ∈ [K],
1873
+ Tr
1874
+
1875
+ ¯ρ(k)o(k)�
1876
+ = Tr
1877
+
1878
+ ¯ρ(k′)o(k′)�
1879
+ , ∥¯ρ(k)∥F = ∥¯ρ(k′)∥F , ∥o(k)∥F = ∥o(k′)∥F .
1880
+ (C7)
1881
+ Then, supported by the inequality a + b ≥ 2
1882
+
1883
+ ab, the loss L(ρ, o) can be further lower bounded by
1884
+ 1
1885
+ 2
1886
+
1887
+ Tr
1888
+
1889
+ ¯ρ(k)o(k)�
1890
+ − 1
1891
+ �2
1892
+ + λρ
1893
+ 2 Knc
1894
+ ���¯ρ(k)���
1895
+ 2
1896
+ F + λo
1897
+ 2 K∥o(j)∥2
1898
+ F
1899
+ ≥1
1900
+ 2
1901
+
1902
+ Tr
1903
+
1904
+ ¯ρ(k)o(k)�
1905
+ − 1
1906
+ �2
1907
+ + K
1908
+
1909
+ ncλoλρ
1910
+ ���¯ρ(k)���
1911
+ F ∥o(j)∥F ,
1912
+ (C8)
1913
+ where the equality condition holds if and only if
1914
+ λo∥o(j)∥2
1915
+ F = ncλρ
1916
+ ���¯ρ(k)���
1917
+ 2
1918
+ F , ∀k ∈ [K].
1919
+ (C9)
1920
+ Note that the requirements C1 ≤ 1 and λo ≤ ncλρ in Lemma 1 imply ∥¯ρ∗(k)∥ ≤ 1 and hence ensure that ¯ρ∗(k) is a
1921
+ meaningful quantum state for ∀k ∈ [K].
1922
+ Since Tr
1923
+
1924
+ ¯ρ(k)o(k)�
1925
+ = ∥¯ρ(k)∥∥o(k)∥ cos(∠(ρ(k), o(k))), the lower bound of L(ρ, o) in Eq. (C8) is equivalent to
1926
+ 1
1927
+ 2
1928
+
1929
+ ∥¯ρ(k)∥∥o(k)∥ cos(∠(ρ(k), o(k))) − 1
1930
+ �2
1931
+ + C1
1932
+ ���¯ρ(k)���
1933
+ F ∥o(j)∥F .
1934
+ Define ∥¯ρ(k)∥∥o(k)∥ = a and ∠(ρ(k), o(k)) = α. The above equation is described by the function f(a, α) = (a cos α −
1935
+ 1)2/2+C1a and its minimum is C1 −C2
1936
+ 1/2 when α∗ = 0 and a∗ = 1−C1. The derivation is as follows. Since a > 0 and
1937
+ its maxima is unbounded, we first consider the case 0 < a < 1. In this case, the minimum of f(a, α) is C1 −C2
1938
+ 1/2 with
1939
+ α∗ = 0 and a∗ = 1 − C1. Otherwise, when a ≥ 1, the minimum of f(a, α) is C1 with α∗ = arccos(1/a) and a∗ = 1.
1940
+ Note that the minimum value of f(a, α) in the second case is always larger than that of the first case. Therefore, the
1941
+ minimum of f(a, α) is C1 − C2
1942
+ 1/2 with α∗ = 0 and a∗ = 1 − C1. Combining the observation that ¯ρ∗(k) and o(k) are in
1943
+ the same direction with Eq. (C9), we achieve Condition (iii), i.e.,
1944
+ o∗(k) =
1945
+
1946
+ ncλρ
1947
+ λo
1948
+ ¯ρ∗(k).
1949
+ The last part is proving Condition (ii). Combining the result ∥¯ρ∗(k)∥∥o∗(k)∥ = 1 − C1 for ∀k ∈ [K] with Eq. (C4)
1950
+ and Condition (iii), we immediately obtain condition (ii), i.e.,
1951
+ (ii)
1952
+
1953
+ ncλρ
1954
+ λo
1955
+ ∥ρ∗(k)∥∥ρ∗(k′)∥ = (1 − C1)δk,k′ ⇒ Tr(¯ρ∗(k)¯ρ∗(k′)) = (1 − C1)
1956
+
1957
+ λo
1958
+ ncλρ
1959
+ δk,k′.
1960
+ (C10)
1961
+ To summarize, given the global optima satisfying the above three conditions, the corresponding empirical risk is
1962
+ RERM(ˆhQ) = 1
1963
+ 2n
1964
+ nc
1965
+
1966
+ i=1
1967
+ K
1968
+
1969
+ k=1
1970
+
1971
+ [Tr(ρ∗(i,k)o∗(k))]k=1:K − y(i,k)�2
1972
+ = C2
1973
+ 1
1974
+ 2
1975
+ (C11)
1976
+
1977
+ 15
1978
+ SM D: Results related to Lemma 2
1979
+ This section is composed of two parts. In SM D 1, we present the proof of Lemma 2. In SM D 2, we explain that
1980
+ the requirements in Lemma 2 are mild.
1981
+ 1.
1982
+ Proof of Lemma 2
1983
+ Different from Lemma 1, here we focus the setting such that the regularization term is set as E = 0 and the operator
1984
+ o is predefined. The explicit form of the loss function L is defined in Eq. (A1). Denote the optimal feature states
1985
+ ρ∗ = minρ L(ρ), we quantify the value of RERM(ˆhQ) with ˆhQ ≡ hQ(ρ∗).
1986
+ We emphasize that the modifications of E and o allow a lower optimal empirical risk. Recall the results of Lemma
1987
+ 1. In the most general case, the optimal empirical risk depends on the regularization term, i.e., RERM(ˆhQ) → C2
1988
+ 1/2.
1989
+ The dependance on C1 motivates us to explore the empirical risk of QC when E = 0. Furthermore, Condition (iii)
1990
+ in Lemma 1 delivers the crucial properties of the optimal measure operator, i.e., the optimal measure operators are
1991
+ orthogonal with each other. Such properties contribute to construct a more effective QCs. Instead of optimizing,
1992
+ the measure operator o can be predefined by inheriting the properties proved in Lemma 1, that is, o are required to
1993
+ span the space C2D×2D and satisfy Tr(o(k)o(k′)) = Bδk,k′ with B ≥ 1 being a constant. Notably, these requirement
1994
+ are mild, covering frequently used measures such as computational basis and Pauli-based measures, as explained in
1995
+ SM D 2.
1996
+ Lemma (Formal statement of Lemma 2). Suppose that the adopted measure operator o spans the space C2D×2D and
1997
+ satisfies Tr(o(k)o(k′)) = Bδk,k′ where B ≥ 1 is a constant. The empirical risk of ˆhQ is RERM(ˆhQ) = 0 when the global
1998
+ minimizer ρ∗ satisfies
1999
+ (i)¯ρ∗(k) := ρ∗(1,k) = ... = ρ∗(nc,k); (ii) Tr(¯ρ∗(k)¯ρ∗(k′)) = Bδk,k′; (iii) Tr(¯ρ∗(k)o(k′)) = δk,k′.
2000
+ (D1)
2001
+ Proof of Lemma 2. The concept of the proof is analogous to Lemma 1, i.e., the global optimizer is identified by lower
2002
+ bounding the loss L(ρ). To this end, the lower bound of L(ρ) yields
2003
+ 1
2004
+ 2Knc
2005
+ nc
2006
+
2007
+ i=1
2008
+ K
2009
+
2010
+ k=1
2011
+
2012
+ [Tr(ρ(i,k)o(j))]j=1:K − y(i,k)�2
2013
+
2014
+ 1
2015
+ 2Knc
2016
+ nc
2017
+
2018
+ i=1
2019
+ K
2020
+
2021
+ k=1
2022
+
2023
+ Tr(ρ(i,k)o(k)) − 1
2024
+ �2
2025
+ =
2026
+ 1
2027
+ 2Knc
2028
+ K
2029
+
2030
+ k=1
2031
+ nc
2032
+
2033
+ i=1
2034
+ nc
2035
+ 1
2036
+ nc
2037
+
2038
+ Tr(ρ(i,k)o(k)) − 1
2039
+ �2
2040
+ ≥ 1
2041
+ 2K
2042
+ K
2043
+
2044
+ k=1
2045
+
2046
+ Tr
2047
+ � nc
2048
+
2049
+ i=1
2050
+ 1
2051
+ nc
2052
+ ρ(i,k)o(k)
2053
+
2054
+ − 1
2055
+ �2
2056
+ ,
2057
+ (D2)
2058
+ where the first inequality uses the facts n = Knc, ∥a − b∥2 = �
2059
+ i(a(i) − b(i))2 ≥ (a(k) − b(k))2, and only the k-th
2060
+ entry of y(i,k) equals to 1, and the second inequality comes from the Jensen’s inequality E(f(x)) ≥ f(E(x)) when f(·)
2061
+ is convex. Note that the equality condition of the first inequality holds if and only if
2062
+ Tr(ρ(i,k)o(j)) = 0, (∀j ∈ [K] \ {k}) ∧ (∀i ∈ [nc]) ;
2063
+ And the equality condition of the second inequality holds if and only if
2064
+ ρ(1,k) = · · · = ρ(i,k) = · · · = ρ(nc,k), ∀k ∈ [K].
2065
+ Denote the mean of the feature state for the k-th class as ¯ρ(k) = �nc
2066
+ i=1
2067
+ 1
2068
+ nc ρ(i,k) for ∀k ∈ [K]. The above two equality
2069
+ conditions suggest that the global minimizer yields
2070
+ ¯ρ∗(k) ≡ ρ∗(1,k) = · · · = ρ∗(nc,k), ∀k ∈ [K]
2071
+ (D3)
2072
+ Tr(¯ρ∗(k)o(j)) = 0, ∀j ∈ [K] \ {k}.
2073
+ (D4)
2074
+
2075
+ 16
2076
+ Combining Eqs. (D2)-(D4), the lower bound of the loss function L(ρ) satisfies
2077
+ 1
2078
+ 2K
2079
+ K
2080
+
2081
+ k=1
2082
+
2083
+ Tr
2084
+
2085
+ ¯ρ(k)o(k)�
2086
+ − 1
2087
+ �2
2088
+ ≥ 1
2089
+ 2
2090
+ � K
2091
+
2092
+ k=1
2093
+ 1
2094
+ K Tr
2095
+
2096
+ ¯ρ(k)o(k)�
2097
+ − 1
2098
+ �2
2099
+ ,
2100
+ (D5)
2101
+ where the inequality comes from the Jensen’s inequality and the equality condition holds if and only if ∀k, k′ ∈ [K],
2102
+ Tr
2103
+
2104
+ ¯ρ(k)o(k)�
2105
+ = Tr
2106
+
2107
+ ¯ρ(k′)o(k′)�
2108
+ .
2109
+ (D6)
2110
+ Supported by Eq. (D6), we can further lower bound L(ρ) with
2111
+ 1
2112
+ 2
2113
+
2114
+ Tr
2115
+
2116
+ ¯ρ(k)o(k)�
2117
+ − 1
2118
+ �2
2119
+ ≥ 0,
2120
+ (D7)
2121
+ where the equality condition is achieved when Tr(¯ρ(k)o(k)) = 1 for ∀k ∈ [K].
2122
+ Taken together, the global optimizer ρ∗ should satisfy Condition (i)&(iii) in Lemma 2, where
2123
+ (i)¯ρ∗(k) := ρ∗(1,k) = ... = ρ∗(nc,k);
2124
+ (iii) Tr(¯ρ∗(k)o(k′)) = δk,k′.
2125
+ (D8)
2126
+ We last prove that Condition (iii) and the requirements of o lead to Condition (ii).
2127
+ In particular, denote the
2128
+ vectorization of ρ∗(k) and o(k) as |ρ∗(k)⟩⟩ and |o(k)⟩⟩, respectively. Condition (iii) can be rewritten as
2129
+ ��
2130
+ ¯ρ∗(k), o(k′)��
2131
+ = δk,k′.
2132
+ (D9)
2133
+ Moreover, since the set of measure operators {o(k)} is required to be complete in the space of C2D and Tr(o(k)o(k′)) =
2134
+ Bδk,k′ with B ≥ 1 for ∀k, k′ ∈ [K], we have
2135
+
2136
+ k
2137
+ ���o(k)����
2138
+ o(k)��� = BI2D.
2139
+ Then, Condition (ii) can be derived as follows, i.e.,
2140
+ Tr(ρ∗(k)ρ∗(k′))
2141
+ =⟨⟨¯ρ∗(k)|I2D|ρ∗(k′)⟩⟩
2142
+ = 1
2143
+ B
2144
+ ��
2145
+ ¯ρ∗(k)���
2146
+
2147
+ k′′
2148
+ |o(k′′)⟩⟩⟨⟨o(k′′)|
2149
+ ���ρ∗(k′)
2150
+ ��
2151
+ = 1
2152
+ B
2153
+ ��
2154
+ ¯ρ∗(k)���|o(k)⟩⟩⟨⟨o(k)|
2155
+ ���ρ∗(k′)��
2156
+ +
2157
+ ��
2158
+ ¯ρ∗(k)���
2159
+
2160
+ k′′̸=k
2161
+ |o(k′′)⟩ ⟨o(k′′)|
2162
+ ���ρ∗(k′)
2163
+ ��
2164
+ = 1
2165
+ B δk,k′.
2166
+ (D10)
2167
+ 2.
2168
+ Requirement of o used in Lemma 2
2169
+ Here we elucidate that the requirements adopted in Lemma 2, i.e., o spans the complex space 2D × 2D and satisfies
2170
+ Tr(o(k)o(k′)) = Bδk,k′ with B ≥ 1, are mild. Specifically, the employed measurements in most QNN-based classifiers
2171
+ satisfy these requirements, including the computational basis measurements and Pauli measurements.
2172
+ Computational basis measurements. In this setting, the local measurement o(k) is set as |k⟩ ⟨k| with |k⟩ being the
2173
+ k-th computational basis for ∀k ∈ [K].
2174
+ When 2D = K, {|k⟩} spans the whole space of C2D×2D and we have
2175
+ Tr(o(k)o(k′)) = (⟨k|k′⟩)2 = δk,k′ with B = 1. The assumptions are satisfied.
2176
+ Pauli measurements. Denote the Pauli operation applied to the i-th qubit as P (i)
2177
+ a
2178
+ with a ∈ {X, Y, Z, I} for ∀i ∈ [D].
2179
+ Then, there are in total 4D Pauli strings P = ⊗D
2180
+ i=1P (i)
2181
+ a
2182
+ that form a orthogonal basis for the space C2D×2D. With
2183
+ setting 2D = K, each o(k) corresponds to one Pauli string with Tr(o(k)o(k′)) = Kδk,k′ with B = K.
2184
+
2185
+ 17
2186
+ SM E: Proof of Lemma 3
2187
+ For elucidating, let us restate Lemma 3 below and introduce the proof sketch before moving on to present the proof
2188
+ details.
2189
+ Lemma (Formal statement of Lemma 3). Denote L1 as the Lipschitz constant of ℓ in Eq. (1) with respect to h. Given
2190
+ a QC defined in Eq. (3), let E be a quantum channel with
2191
+ hQ(x, U(θ), O(k)) ≡ Tr(o(k)E(σ(x))), ∀k ∈ [K].
2192
+ (E1)
2193
+ Suppose the measure operator follows maxk∈[K] ∥o(k)∥ ≤ C2. The explicit form of the encoding unitary follows UE(x) =
2194
+ �Ng
2195
+ g=1 ug(x) ∈ U(2N) with the g-th quantum gate ug(x) ∈ U(2m) operating with at most m qubits with m ≤ N and Ng
2196
+ gates consisting of Nge variational gates and Ng − Nge fixed gates,
2197
+ Following above notations and Definition 1, the empirical QC is (K( 28Nge
2198
+ ϵ
2199
+ )4mNge, 4L1KC2ϵ)-robust and with prob-
2200
+ ability 1 − δ, its generalization error yields
2201
+ RGene(ˆh) ≤ 4L1KC2ϵ + 3ξ(ˆh)
2202
+
2203
+ |TD|4mNge ln(56KNge/(ϵδ))
2204
+ n
2205
+ + ξ(ˆh)2|TD|4mNge ln(56KNge/(ϵδ))
2206
+ n
2207
+ ,
2208
+ where L1 is the Lipschitz constant of ℓ with respect to h, ID
2209
+ r = {i ∈ [n] : z(i) ∈ Cr}, ξ(ˆh) := maxz∈Z ℓ(ˆh, z), and
2210
+ TD := {r ∈ [R] : |ID
2211
+ r | ≥ 1}.
2212
+ The proof of Lemma 3 is established on the following lemma, which leverages the algorithmic robustness to quantify
2213
+ the upper bound of the generalization error.
2214
+ Lemma 6 (Theorem 1, [105]). If the learning algorithm A is (R, ν(·))-robust with {Cr}R
2215
+ r=1, then for any δ > 0, with
2216
+ probability at least 1−δ over an i.i.d drawn of n samples D = {z(i)}n
2217
+ i=1 with z(i) = (x(i), y(i)), the returned hypothesis
2218
+ ˆh by A on D satisfies
2219
+ RGene(ˆh) ≤ ν(D) + ξ(ˆh)
2220
+
2221
+ (
2222
+
2223
+ 2 + 1)
2224
+
2225
+ |TD| ln(2R/δ)
2226
+ n
2227
+ + 2|TD| ln(2R/δ)
2228
+ n
2229
+
2230
+ ,
2231
+ (E2)
2232
+ where ID
2233
+ r = {i ∈ [n] : z(i) ∈ Cr}, ξ(ˆh) := maxz∈Z(ℓ(ˆh, z)), and TD := {r ∈ [R] : |ID
2234
+ r | ≥ 1}.
2235
+ The above result hints that given a hypothesis ˆh, its generalization error is upper bounded by the disjoint sets {Cr}R
2236
+ r=1,
2237
+ where a lower cardinality R allows a lower generalization error. A natural approach to realize these disjoint partitions
2238
+ is covering number [70].
2239
+ Definition 2 (Covering number, [65]). Given a metric space (U, ∥ · ∥), the covering number N(U, ϵ, ∥ · ∥) denotes the
2240
+ least cardinality of any subset V ⊂ U that covers U at scale ϵ with a norm ∥ · ∥, i.e., supA∈U minB∈V ∥A − B∥ ≤ ϵ.
2241
+ In conjunction with Lemma 6 and Definition 2, the analysis of RGene(ˆh) of an N-qubit QC amounts to quantifying
2242
+ the covering number of the space of the input quantum states, i.e.,
2243
+ XQ =
2244
+
2245
+ UE(x)(|0⟩ ⟨0|)⊗NUE(x)†��x ∈ X
2246
+
2247
+ .
2248
+ (E3)
2249
+ The following lemma connects the robustness and covering number of XQ of QCs whose proof is provided in Sec. E 1.
2250
+ Lemma 7. Following the settings in Eqs. (E1)-(E3), the corresponding QC is (K( 28Nge
2251
+ ϵ
2252
+ )4mNge, 4L1KC2∥E∥⋄ϵ)-robust.
2253
+ We are now ready to prove Lemma 3.
2254
+ Proof of Lemma 3. The generalization error bound can be acquired by combining Lemmas 6 and 7, i.e.,
2255
+ RGene(ˆh) ≤4L1KC2∥E∥⋄ϵ + ξ(ˆh)
2256
+
2257
+ �(
2258
+
2259
+ 2 + 1)
2260
+
2261
+ |TD| ln(2K( 28Nge
2262
+ ϵ
2263
+ )4mNge/δ)
2264
+ n
2265
+ + 2|TD| ln(2K( 28Nge
2266
+ ϵ
2267
+ )4mNge/δ)
2268
+ n
2269
+
2270
+
2271
+ ≤4L1KC2∥E∥⋄ϵ + ξ(ˆh)
2272
+
2273
+ 3
2274
+
2275
+ |TD|4mNge ln(56KNge/(ϵδ))
2276
+ n
2277
+ + 2|TD|4mNge ln(56KNge/(ϵδ))
2278
+ n
2279
+
2280
+ ≤4L1KC2ϵ + ξ(ˆh)
2281
+
2282
+ 3
2283
+
2284
+ |TD|4mNge ln(56KNge/(ϵδ))
2285
+ n
2286
+ + 2|TD|4mNge ln(56KNge/(ϵδ))
2287
+ n
2288
+
2289
+ ,
2290
+ (E4)
2291
+ where ID
2292
+ r = {i ∈ [n] : z(i) ∈ Cr}, ξ(ˆh) := maxz∈Z(ℓ(ˆh, z)), and TD := {r ∈ [R] : |ID
2293
+ r | ≥ 1}.
2294
+
2295
+ 18
2296
+ 1.
2297
+ Proof of Lemma 7
2298
+ The proof uses the following lemma to quantify the covering number of XQ whose proof is given in SM E 2.
2299
+ Lemma 8. Following the settings in Eq. (E1), the covering number of XQ in Eq. (E3) is
2300
+ N(XQ, ϵ, ∥ · ∥F ) ≤
2301
+ �28Nge
2302
+ ϵ
2303
+ �4mNge
2304
+ .
2305
+ (E5)
2306
+ Proof of Lemma 7. When QC is applied to accomplish the K-class classification task, the sample space is Z = XQ ×Y
2307
+ with Y = {1, 2, ..., K}. Denote ˜
2308
+ XQ as the ϵ-cover set of XQ with the covering number N(XQ, ϵ, ∥ · ∥F ) in Definition 2.
2309
+ Supported by the ϵ-cover set ˜
2310
+ XQ, the space XQ × {i} can be divided into N(XQ, ϵ, ∥ · ∥F ) sets for ∀i ∈ [K]. In other
2311
+ words, we can divide Z into KN(XQ, ϵ, ∥ · ∥F ) sets denoted by {Zi}KN (XQ,ϵ,∥·∥F )
2312
+ i=1
2313
+ .
2314
+ We then utilize the divided sets of Z to connect the robustness with covering number according to Definition 1.
2315
+ Given a training example (x(i), y(i)) and a test example (x, y), suppose that the corresponding quantum examples
2316
+ (σ(x(i)), y(i)) and (σ(x), y) are in the same set of {Zi}KN (XQ,ϵ,∥·∥F )
2317
+ i=1
2318
+ . For convenience, we abbreviate σ(x(i)) and σ(x)
2319
+ as σ(i) and σ, respectively. Following the definition of covering number, we have
2320
+ y(i) = y and ∥σ(i) − σ∥F ≤ 2ϵ.
2321
+ (E6)
2322
+ Since the encoded state takes the form σ = UE(x)(|0⟩ ⟨0|)⊗NUE(x)†, we have
2323
+ rank(σ(i) − σ) ≤ 2.
2324
+ (E7)
2325
+ Then, in accordance with the definition of robustness, we bound the discrepancy of the loss values for σ(i) and σ, i.e.,
2326
+ ���l(hQ(σ(i)), y(i)) − l(hQ(σ), y)
2327
+ ���
2328
+ ≤L1
2329
+ ���[Tr(E(σ(i))o(k))]k=1:K − [Tr(E(σ))o(k))]k=1:K
2330
+ ���
2331
+ 2
2332
+ ≤L1K max
2333
+ k∈K | Tr(E(σ(i)))o(k)) − Tr(E(σ)o(k))|
2334
+ ≤L1K max
2335
+ k
2336
+ ���o(k)���
2337
+ 2 Tr(|E(σ(i) − σ)|)
2338
+ ≤2L1KC2∥E∥⋄∥σ(i) − σ∥F
2339
+ ≤4L1KC2∥E∥⋄ϵ,
2340
+ (E8)
2341
+ where the first inequality uses the Lipschitz property of the loss function with ℓ(a, b) − ℓ(c, d) ≤ L1∥a − c∥2 and the
2342
+ form of E in Lemma 7, the second inequality comes from the definition of l2 norm, the third inequality exploits von
2343
+ Neumann’s trace inequality | Tr(AB)| ≤ ∥A∥p∥B∥q with 1/p + 1/q = 1 and the linear property of CPTP map with
2344
+ E(ρ)−E(σ) = E(ρ−σ), the last second inequality employs maxk
2345
+ ��o(k)��
2346
+ 2 ≤ C2, the relation ∥E(ρ−σ)∥1 ≤ ∥E∥⋄∥ρ−σ∥1
2347
+ and ∥A∥1 ≤ rank(A)∥A∥F , and the last inequality adopts the result in Eq. (E6).
2348
+ The above result exhibits that the learned QC is (KN(XQ, ϵ, ∥ · ∥), 4L1KC2∥E∥⋄ϵ)-robust. In this regard, the proof
2349
+ can be completed when the upper bound of the covering number N(XQ, ϵ, ∥ · ∥F ) is known. Supported by Lemma 8,
2350
+ we obtain N(XQ, ϵ, ∥ · ∥F ) ≤ ( 28Nge
2351
+ ϵ
2352
+ )4mNge. Taken together, the learned QC is
2353
+
2354
+ K
2355
+ �28Nge
2356
+ ϵ
2357
+ �4mNge
2358
+ , 4L1KC2∥E∥⋄ϵ
2359
+
2360
+ − robust.
2361
+ 2.
2362
+ Proof of Lemma 8
2363
+ The derivation of the covering number of XQ in Eq. (E3) uses the following lemma.
2364
+ Lemma 9 (Lemma 1, [106]). For 0 < ϵ < 1/10, the ϵ-covering number for the unitary group U(2m) with respect to
2365
+ the Frobenius-norm distance in Definition 2 obeys
2366
+ � 3
2367
+
2368
+ �4m
2369
+ ≤ N(U(2m), ϵ, ∥ · ∥F ) ≤
2370
+ �7
2371
+ ϵ
2372
+ �4m
2373
+ .
2374
+ (E9)
2375
+
2376
+ 19
2377
+ Proof of of Lemma 8. Recall the input state space is XQ = {UE(x)(|0⟩ ⟨0|)⊗NUE(x)†|x ∈ X}, where the encoding
2378
+ unitary UE(x) = �Ng
2379
+ g=1 ug(x) ∈ U(2N) consists of Nge variational gates and Ng − Nge fixed gates. To quantify the
2380
+ covering number N(XQ, ϵ, ∥·∥F ), we define ˜S as the ϵ-covering set for the unitary group U(2m), ˜
2381
+ XQ as the ϵ′-covering
2382
+ set of XQ, and define a set
2383
+ ˜UE :=
2384
+
2385
+
2386
+
2387
+
2388
+ i∈{Nge}
2389
+ ui(x)
2390
+
2391
+ j∈{Ng−Nge}
2392
+ uj(x)
2393
+ ���ui(x) ∈ ˜S
2394
+
2395
+
2396
+ � ,
2397
+ (E10)
2398
+ where ui(θi) and uj specify to the variational and fixed quantum gates, respectively. Note that for any encoding circuit
2399
+ UE(x), we can always find a unitary UE,ϵ(x) ∈ ˜UE where each ug(x) is replaced by the nearest element in the covering
2400
+ set ˜S. To this end, following the definition of covering number, the discrepancy between UE(x)(|0⟩ ⟨0|)⊗NUE(x)† ∈ XQ
2401
+ and UE,ϵ(x)(|0⟩ ⟨0|)⊗NUE,ϵ(x)† ∈ ˜
2402
+ XQ under the Frobenius norm satisfies
2403
+ ��UE(x)(|0⟩ ⟨0|)⊗NUE(x)† − UE,ϵ(x)(|0⟩ ⟨0|)⊗NUE,ϵ(x)†��
2404
+ F
2405
+ ≤2
2406
+ ��UE(x)(|0⟩ ⟨0|)⊗NUE(x)† − UE,ϵ(x)(|0⟩ ⟨0|)⊗NUE,ϵ(x)†��
2407
+ ≤2∥UE(x) − UEϵ(x)∥∥(|0⟩ ⟨0|)⊗N∥
2408
+ ≤4Ngeϵ,
2409
+ (E11)
2410
+ where the first inequality uses ∥X∥F ≤ rank(X)∥X∥ and the relation in Eq. (E7), the second inequality comes from
2411
+ the Cauchy–Schwarz inequality, and the last inequality follows ∥UE(x) − UE,ϵ(x)∥ ≤ Ngeϵ and ∥(|0⟩ ⟨0|)⊗N∥ = 1. In
2412
+ other words, ϵ′ = 2Ngeϵ and ˜
2413
+ XQ is a (4Ngeϵ)-covering set for XQ. In conjunction with the observation that there are
2414
+ | ˜S|Nge combinations for the gates in ˜UE and the results in Lemma 9, we obtain the cardinality of the set ˜UE is upper
2415
+ bounded by | ˜UE| ≤
2416
+ � 7
2417
+ ϵ
2418
+ �4mNge. Accordingly, supported by Eq. (E11), the covering number of XQ satisfies
2419
+ N(XQ, 4Ngeϵ, ∥ · ∥F ) ≤
2420
+ �7
2421
+ ϵ
2422
+ �4mNge
2423
+ .
2424
+ (E12)
2425
+ After simplification, we have
2426
+ N(XQ, ϵ, ∥ · ∥F ) ≤
2427
+ �28Nge
2428
+ ϵ
2429
+ �4mNge
2430
+ .
2431
+ (E13)
2432
+ SM F: Implementation of the algorithm to probe potential advantages of QCs
2433
+ The expected risk is the most principal criteria to quantify the power of a classifier. As a result, to probe whether
2434
+ a QC holds potential advantages over a CC on a specific learning task, the simplest way is comparing their risk
2435
+ curves. Nevertheless, capturing these two risk curves are difficult, because of many flexible hyper-parameter settings
2436
+ to initiate a classifier.
2437
+ The developed theories in Theorem 1 and Lemmas 1-3 deliver concrete rules to set up these hyper-parameters and
2438
+ thus allow an efficient way to estimate these risk curves. In particular, the derived U-shape curve of QCs indicates
2439
+ that the minimum risk of QC locates at the modest size of the hypothesis space HQ. In other words, the number
2440
+ of trainable parameters NT should be lower than O(poly(N)), with N being the number of qubits in QC. Moreover,
2441
+ Lemma 3 hints that the generalization error of QC can be well suppressed by using the modest number of train
2442
+ examples. As such, if the available number of training examples in D is tremendous, we can distill a subset from D
2443
+ to better recognize quantum advantages.
2444
+ The Pseudo code of the proposed method is presented in Alg. 1. To make a fair comparison, the hyper-parameter
2445
+ settings applied to QC and CC, especially for those relating to the computational resources, are required to keep
2446
+ to be the same. Specifically, in each comparison, the employed loss function, the train examples n, the number of
2447
+ trainable parameters Nt, and the number of epochs T applied to QC and CC should be identical. Note that the
2448
+ learning rate, the adopted optimizer, and the batch size can be varied of different classifiers to better estimate the
2449
+ empirical hypothesis. To ensure that the collected results of QC span its basin of the risk curve, the employed W
2450
+ settings of Nt can be acquired by uniformly interpolating from O(1) to O(poly(N)). The iteration T should ensure
2451
+ the convergence of QC. Once the loss values of QC and CC under {n(w), N (w)
2452
+ t
2453
+ , T (w)}W
2454
+ w=1 are obtained, we can apply
2455
+ certain fitting algorithms to attain their risk curves.
2456
+
2457
+ 20
2458
+ Algorithm 1: Estimate risk curves of quantum and classical classifiers
2459
+ Data: The train dataset D, the test dataset DT est, QC hQ associated with the hypothesis space HQ, CC hC associated
2460
+ with the hypothesis space HQ, the loss function L(·, ·).
2461
+ Result: The estimated risk curves of QC and CC.
2462
+ Initialization: W tuples of hyper-parameter settings {n(w), N (w)
2463
+ t
2464
+ , T (w)}W
2465
+ w=1 with n being train examples, Nt being the
2466
+ number of trainable parameters, and T being the number of epochs;
2467
+ for w = 1, w ≤ W, w + + do
2468
+ Initialize train data as D(w) by distilling n(w) examples from D;
2469
+ # Collect loss dynamics of QC ;
2470
+ Minimize the loss function L(·, ·) via gradient descent methods to obtain the empirical quantum classifier ¯h(w)
2471
+ Q
2472
+ ∈ HQ
2473
+ using D(w) within T (w) epochs and NT trainable parameters;
2474
+ Record the loss value L(¯h(w)
2475
+ Q , DT est) ;
2476
+ # Collect loss dynamics of CC ;
2477
+ Minimize the loss function L(·, ·) via gradient descent methods to obtain the empirical classical classifier ¯h(w)
2478
+ C
2479
+ ∈ HC
2480
+ using D(w) within T (w) epochs and NT trainable parameters;
2481
+ Record the loss value L(¯h(w)
2482
+ C , DT est) ;
2483
+ end
2484
+ Fitting the loss dynamics of {L(¯h(w)
2485
+ Q , DT est)}W
2486
+ w=1 to obtain the estimated risk curve of QC ;
2487
+ Fitting the loss dynamics of {L(¯h(w)
2488
+ C , DT est)}W
2489
+ w=1 to obtain the estimated risk curve of CC.
2490
+ Ul(✓)
2491
+ RZ(✓(l,1,1))
2492
+ RY (✓(l,1,2))
2493
+ RZ(✓(l,1,3))
2494
+ RZ(✓(l,2,1))
2495
+ RY (✓(l,2,2))
2496
+ RZ(✓(l,2,3))
2497
+ RZ(✓(l,3,1))
2498
+ RY (✓(l,3,2))
2499
+ RZ(✓(l,3,3))
2500
+ RZ(✓(l,4,1))
2501
+ RY (✓(l,4,2))
2502
+ RZ(✓(l,4,3))
2503
+ ×𝐿
2504
+ (a)
2505
+ (b)
2506
+ Class 1:
2507
+ Class 2:
2508
+ Class 3:
2509
+ FIG. G.4.
2510
+ Visualization of image dataset and hardware-efficient Ansatz.
2511
+ (a) Image instances sampled from the
2512
+ Fashion-MNIST dataset. (b) The circuit architecture of the employed Hardware-efficient Ansatz. The label ‘×L’ denotes the
2513
+ layer number, which means repeating the gates in the dashed box with L times.
2514
+ SM G: Numerical simulation details
2515
+ Dataset. The construction of the parity dataset mainly follows from Ref. [98]. Note that this task has also been
2516
+ broadly studied in the field of deep learning to show the limits of deep neural classifiers [107, 108]. The constructed
2517
+ dataset contains in total 64 examples. Each example corresponds to a bit-string with the length 6, i.e., x ∈ {0, 1}6.
2518
+ The label of x is assigned to be 1 if the number of ‘0’ in x is even; otherwise, the label is 0. We split it into train dataset
2519
+ and test dataset with the train-test-split ratio being 0.75. The number of train examples in each class is controlled to
2520
+ be the same. For each example, its feature dimension is 10. The image dataset is adapted from Ref. [102]. Specifically,
2521
+ the data from the first nine classes are preserved and the total number of examples is 180. The train-test-split ratio is
2522
+ set as 0.5 to construct the train and test dataset. Each example corresponds to an image with 28 × 28 pixels. In the
2523
+ preprocessing stage, we flatten all examples followed by padding and normalization. The processed example yields an
2524
+ 10-qubit state with x ∈ R210 and ∥x∥2
2525
+ 2 = 1. Some examples after preprocessing are illustrated in Fig. G.4(a).
2526
+ Construction of QCs. The quantum subroutine of QC consists of the encoding circuit UE and the Ansatz U(θ).
2527
+ For all learning tasks, the hardware-efficient Ansatz is employed whose mathematical expression is U(θ) = �L
2528
+ l Ul(θ).
2529
+ The layout of the hardware-efficient Ansatz follows the layer-wise structure and the gate arrangement at each layer
2530
+ is the same. For ∀l ∈ [L], Ul(θ) = �N
2531
+ i=1(RZ(θ(l,i,1)) RY(θ(l,i,2)) RZ(θ(l,i,3)))Uent with Uent being the entanglement
2532
+ layer formed by CNOT gates. Fig. G.4(b) depicts the adopted hardware-efficient Ansatz with L layers.
2533
+ The encoding methods for the parity dataset classification and the digit images classification are different. The
2534
+ former uses the basis encoding method. Specifically, for a classical example x ∈ Rd, the employed encoding unitary
2535
+
2536
+ 21
2537
+ (a)
2538
+ (b)
2539
+ FIG. G.5. Geometric properties of the quantum feature states on parity dataset. (a) The averaged performance of
2540
+ QC evaluated by M1 defined in Eq. (G1). The label ‘Init-C-k’ with k = 1, 2 refers that the value of M(k)
2541
+ 1
2542
+ at the initialization.
2543
+ Similarly, the label ‘Final-C-k’ with k = 1, 2 refers that the value of M(k)
2544
+ 1
2545
+ when the training of QC is completed. (b) The
2546
+ averaged performance of QC evaluated by M2 defined in Eq. (G2). The label ‘Init-C-1-2’ (‘Final-C-1-2’) refers that the value
2547
+ of M2 before and after training of QC. The label ‘L = a’ in the x-axis stands for that the layer number of hardware-efficient
2548
+ Ansatz is a.
2549
+ is UE(x) |0⟩⊗d = |x⟩, which maps x to a 2d dimensional quantum state UE(x) |0⟩⊗d. The latter uses the amplitude
2550
+ encoding method. Given a normalized image x ∈ R64 with ∥x∥2
2551
+ 2 = 1, the corresponding unitary encodes it into a
2552
+ 6-qubit state with UE(x) |0⟩⊗6 = �64
2553
+ j=1 xj |j⟩.
2554
+ The Pauli-based measure operators are used in learning Fashion-MNIST dataset. Since the preprocessed dataset
2555
+ contains 9 classes, there are in total 9 measure operators, i.e., o(1) = X⊗X⊗I⊗8, o(2) = X⊗Y ⊗I⊗8, o(3) = X⊗Z⊗I⊗8,
2556
+ o(4) = Y ⊗X ⊗I⊗8, o(5) = Y ⊗Y ⊗I⊗8, o(6) = Y ⊗Z ⊗I⊗8, o(7) = Z ⊗X ⊗I⊗8, o(8) = Z ⊗Y ⊗I⊗8, o(9) = Z ⊗Z ⊗I⊗8.
2557
+ Multilayer Perceptron.
2558
+ To better justify the capability and performance of QCs, we apply the multilayer
2559
+ perceptron (MLP) as the reference [109]. MLP is composed of an input layer, L hidden layers with L ≥ 1, and an
2560
+ output layer. The dimension of the input layer is equivalent to the feature dimension of the input. ReLU activations
2561
+ are added in the hidden layer to perform nonlinear transformation. In the output layer, the activation function,
2562
+ Softmax, is employed. The number of layers L depends on the assigned tuples {n, Nt, T}.
2563
+ Convolutional neural network. In the task of image classification, convolutional neural networks (CNNs) is
2564
+ employed as the reference [109]. The employed CNN is formed by two convolutional layers and one fully-connected
2565
+ layer. ReLU activations and the pooling operation are added in the hidden layer to perform nonlinear transformation.
2566
+ The number of channels for the first convolutional layer is fixed to be 8 and the corresponding kernel size is 9 × 9.
2567
+ The kernel size of the pooling operation applied to the two convolutional layers is 2×2. The kernel size for the second
2568
+ convolutional layer is fixed to be 5×5 but the number of output channels is varied depending on the settings in Alg. 1.
2569
+ For the sake of fair comparison, the number of output channels is set as 2, 6, 15, 30, 50, 75, where the corresponding
2570
+ number of parameters is 860, 1284, 2238, 3828, 5948, and 8598, respectively.
2571
+ Optimizer and other hyper-parameters. The adaptive gradient descent method, named AdaGrad optimizer
2572
+ [110], is used to optimize QCs and MLPs. Compared to the vanilla gradient descent method, AdaGrad permits better
2573
+ performance, since it adapts the learning rate for each feature depending on the estimated geometry of the problem.
2574
+ In the task of parity learning, the initial learning rate is set as η = 0.5 for QC and η = 0.01 for MLP, respectively.
2575
+ For both classifiers, the batch size is fixed to be 4. In the task of image classification, the initial learning rate is set
2576
+ as η = 0.05 for QC and η = 0.01 for CNN, respectively. The batch size for both classifiers is set as 1.
2577
+ Curve fitting method. To capture the risk curve, Alg. 1 requests a curve fitting method. For all experiments,
2578
+ we adopt the polynomial fitting to derive the risk curve by using the collected results. The least squares method in
2579
+ determining the best fitting functions.
2580
+ Source code.
2581
+ The source code used in numerical simulations will be available at Github repository https:
2582
+ //github.com/yuxuan-du/Problem-dependent-power-of-QNNs.
2583
+
2584
+ 0.8
2585
+ Init-C-1-2
2586
+ Fin-C-1-2
2587
+ 0.6
2588
+ 0.4
2589
+ 0.2
2590
+ 0.0
2591
+ L=1L=2L=3L=4L=5L=6L=74.8
2592
+ Init-C-1
2593
+ Init-C-2
2594
+ 4.0
2595
+ Fin-C-1
2596
+ Fin-C-2
2597
+ 3.2
2598
+ Distance
2599
+ 2.4
2600
+ 1.6
2601
+ 0.8
2602
+ 0.0
2603
+ L=1L=2L=3L=4L=5L=6L=722
2604
+ (a)
2605
+ (b)
2606
+ FIG. G.6. Train (test) accuracy versus epoch on parity dataset. (a) Train accuracy and test accuracy of QC with the
2607
+ varied layer number. The label ‘L = a’ refers that the layer number used in hardware-efficient Ansatz is a. The solid line and
2608
+ the dashed line separately correspond to the train and test accuracies of QC. (b) Train accuracy and test accuracy of MLP
2609
+ with the varied number of hidden neurons. The label ‘h = a’ refers that the number of neurons is a. The solid and dashed
2610
+ lines have the same meaning with those in QC.
2611
+ 1.
2612
+ Simulation results of the binary classification for the parity dataset
2613
+ The feature states before and after training. We explore the geometric properties of feature states when the
2614
+ layer number of hardware-efficient Ansatz varies from L = 1 to L = 7. Other settings are identical to those introduced
2615
+ in the main text. Condition (i) in Lemma 2 is evaluated by the metric
2616
+ M(k)
2617
+ 1
2618
+ =
2619
+ nc
2620
+
2621
+ i=1
2622
+ ∥ρ(i,k) − ¯ρ(k)∥,
2623
+ (G1)
2624
+ where the number of train examples {ρ(i,k)}nc
2625
+ i=1 belonging to the k-th class is nc and ¯ρ(k) refers to their class-feature
2626
+ mean. Since parity learning is a binary classification task, Condition (ii) in Lemma 2 is evaluated by
2627
+ M2 = Tr(¯ρ(0)¯ρ(1)).
2628
+ (G2)
2629
+ The geometric properties of the feature states in the measure of M(k)
2630
+ 1
2631
+ and M2 are visualized in Fig. G.5. The left
2632
+ panel shows that when L ∈ {2, 3, 4, 5}, both the value of M(1)
2633
+ 1
2634
+ (highlighted by the green color) and M(2)
2635
+ 1
2636
+ (highlighted
2637
+ by the pink color) decrease from ∼ 3.2 (epoch t = 0) to ∼ 0.5 (epoch t = 40). These results comply with Condition
2638
+ (i) in the sense that the feature states in the same class concentrates to the class-feature mean and leads to the low
2639
+ empirical risk. By contrast, when L is too small or too large, the value of M(1)
2640
+ 1
2641
+ changes subtly before and after
2642
+ optimization, which is above 3.2. The large deviation of feature states incurs the degrade performance of QC. The
2643
+ right panel depicts that when L ∈ {2, 3, 4, 5}, the value of M(2)
2644
+ 1
2645
+ decreases from 0.5 (epoch t = 0) to 0.05 (epoch
2646
+ t = 40). This reduction means that the class-feature means are maximally separated and thus ensure a good learning
2647
+ performance. On the contrary, when L ∈ {1, 6, 7}, the the value of M(2)
2648
+ 1
2649
+ oscillates around 0.5, which implies that the
2650
+ class-feature means ¯ρ(1) and ¯ρ(2) are highly overlapped.
2651
+ The learning dynamics of QC and MLP. Fig. G.6 visualizes the learning dynamics of QC and MLP with
2652
+ respect to the varied trainable parameters. The left panel indicates that when the layer number is L = 2, 3, 4, both
2653
+ train and test accuracies of QC fast converge to 100% with 25 epochs. When L = 1, both train and test accuracies
2654
+ oscillate to 50%. When L = 7, the number of train data becomes insufficient and the overfitting phenomenon appears.
2655
+ These results accord with the U-shape risk curve of QCs. The right panel shows that when the number of hidden
2656
+ neurons ranges from h = 1 to h = 18, the test accuracy of MLP is no higher that 55%. These results reflect the
2657
+ incapability of MLP in learning parity dataset compared with QCs.
2658
+ 2.
2659
+ Simulation results of multi-class classification for the Fashion-MNIST images dataset
2660
+ The feature states before and after training. Here we discuss the geometric properties of feature states when
2661
+ the layer number of hardware-efficient Ansatz varies from L = 2 to L = 150. The metrics M(k)
2662
+ 1
2663
+ and M2 defined in
2664
+
2665
+ 1.00
2666
+ h=1
2667
+ h=2
2668
+ 0.85-
2669
+ h=6
2670
+ h=10
2671
+ 0.70
2672
+ h = 14
2673
+ Acc
2674
+ h= 18
2675
+ 0.55
2676
+ 0.40
2677
+ 0
2678
+ 10
2679
+ 20
2680
+ 30
2681
+ 401.00
2682
+ 0.85
2683
+ 0.70
2684
+ 0.55
2685
+ 0.40
2686
+ 0
2687
+ 10
2688
+ 20
2689
+ 30
2690
+ 4023
2691
+ (a)
2692
+ (b)
2693
+ FIG. G.7. Geometric properties of the quantum feature states on Fashion-MNIST dataset. (a) The averaged
2694
+ performance of QC evaluated by M1 defined in Eq. (G1). (b) The averaged performance of QC evaluated by M2 defined in
2695
+ Eq. (G2). All labels have the same meaning with those introduced in Fig. G.5.
2696
+ (a)
2697
+ (b)
2698
+ FIG. G.8. Train (test) accuracy versus epoch on Fashion-MNIST dataset. (a) Train accuracy and test accuracy of
2699
+ QC with the varied layer number. The labels have the same meaning with those presented in Fig. G.6. (b) Train accuracy
2700
+ and test accuracy of CNN with the varied number of trainable parameters. The label ‘h = a’ refers that the number of output
2701
+ channels at the second layer is a. The solid and dashed lines have the same meaning with those in QC.
2702
+ Eqs. (G1) and (G2) are employed. In the measure of M2, since the performance of QC for any two classes is similar,
2703
+ we only study the first two classes for ease of visualization.
2704
+ Fig. G.7 depicts the geometric properties of the feature states in the measure of M(k)
2705
+ 1
2706
+ and M2. The left panel
2707
+ shows that for all settings with L ∈ {2, 5, 25, 50, 100, 150}, the value M(k)
2708
+ 1
2709
+ at the initial step and the final step is very
2710
+ similar and M(k)
2711
+ 1
2712
+ is larger than 0.2 for ∀k ∈ {1, 2, ..., 9}. These results indicate that QC cannot satisfy Condition
2713
+ (i) when learning Fashion-MNIST dataset, where the feature states from the same class cannot collapse to a unique
2714
+ point. Moreover, when we examine the performance of intra-class, the right panel implies that after training, the
2715
+ class-feature means of QC are still highly overlapping. The distance for all settings of L is above 0.3. The inability
2716
+ to achieve the optimal training loss shows the the limited power of QC on learning Fashion-MNIST dataset.
2717
+ The learning dynamics of QC and CNN. Fig. G.8 depicts the learning dynamics of QC and CNN with the
2718
+ varied number of trainable parameters. The left panel indicates that QC achieves the best performance when the layer
2719
+ number is L ∈ [25, 100], where the corresponding number of parameters ranges from 750 to 3000. In these settings,
2720
+ both train and test accuracies of QC are around 30% after 50 epochs. When L < 25 or L > 100, both train and test
2721
+ accuracies oscillate at 15%. These results accord with the U-shape risk curve of QCs. The right panel shows that
2722
+ the train and test accuracies of CNN are steadily growing with the increased number of channels. That is, when the
2723
+ number of channels at the second layer is not less than 6, both the train and test accuracies are higher than 60%.
2724
+ These results indicate that the employed QC does not have potential advantages in learning image dataset compared
2725
+ with CNN.
2726
+
2727
+ Init-C-1-2
2728
+ Fin-C-1-2
2729
+ 0.8
2730
+ Distance
2731
+ e
2732
+ 0.6
2733
+ 0.4
2734
+ 0.2
2735
+ 0.0
2736
+ L = 2
2737
+ L= 5
2738
+ 5L=25L=50L=100L=150Init-C-1
2739
+ Fin-C-5
2740
+ Fin-C-1
2741
+ Init-C-6
2742
+ 1.6
2743
+ Init-C-2
2744
+ Fin-C-6
2745
+ Fin-C-2
2746
+ Init-C-7
2747
+ Init-C-3
2748
+ Fin-C-7
2749
+ e 1.2
2750
+ Fin-C-3
2751
+ Init-C-8
2752
+ Distance
2753
+ Init-C-4
2754
+ Fin-C-8
2755
+ Fin-C-4
2756
+ Init-C-9
2757
+ 0.8
2758
+ Init-C-5
2759
+ Fin-C-9
2760
+ 0.4
2761
+ 0.0
2762
+ L = 2
2763
+ L= 5
2764
+ L=25L=50L=100L=1501.00
2765
+ 0.75
2766
+ h=2
2767
+ 00.50
2768
+ h=6
2769
+ h= 15
2770
+ h = 30
2771
+ 0.25
2772
+ h= 50
2773
+ h= 75
2774
+ 0.00
2775
+ 0
2776
+ 10
2777
+ 20
2778
+ 30
2779
+ 40
2780
+ 500.45
2781
+ L=2
2782
+ L= 5
2783
+ L= 25
2784
+ 0.30
2785
+ L = 50
2786
+ L= 100
2787
+ Acc
2788
+ _= 150
2789
+ 0.15
2790
+ 0.00
2791
+ 0
2792
+ 10
2793
+ 20
2794
+ 30
2795
+ 40
2796
+ 50
ftAzT4oBgHgl3EQfof1Y/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
hdA0T4oBgHgl3EQfIP8h/content/tmp_files/2301.02071v1.pdf.txt ADDED
@@ -0,0 +1,1404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Published as a conference paper at EMNLP 2022
2
+ Towards Table-to-Text Generation with Pretrained Language Model: A
3
+ Table Structure Understanding and Text Deliberating Approach
4
+ Miao Chen§ ∗, Xinjiang Lu¶ �, Tong Xu§, Yanyan Li ¶, Jingbo Zhou¶, Dejing Dou¶, Hui Xiong† ‡
5
+ ¶BIL, Baidu Research,
6
+ §University of Science and Technology of China
7
+ †Hong Kong University of Science and Technology (Guangzhou)
8
+ ‡Guangzhou HKUST Fok Ying Tung Research Institute
9
+ [email protected], {luxinjiang,liyanyanliyanyan,zhoujingbo}@baidu.com,
10
11
+ Abstract
12
+ Although remarkable progress on the neural
13
+ table-to-text methods has been made, the gen-
14
+ eralization issues hinder the applicability of
15
+ these models due to the limited source tables.
16
+ Large-scale pretrained language models sound
17
+ like a promising solution to tackle such issues.
18
+ However, how to effectively bridge the gap
19
+ between the structured table and the text in-
20
+ put by fully leveraging table information to
21
+ fuel the pretrained model is still not well ex-
22
+ plored. Besides, another challenge of integrat-
23
+ ing the deliberation mechanism into the text-
24
+ to-text pretrained model for solving the table-
25
+ to-text task remains seldom studied. In this
26
+ paper, to implement the table-to-text genera-
27
+ tion with pretrained language model, we pro-
28
+ pose a table structure understanding and text
29
+ deliberating approach, namely TASD. To be
30
+ specific, we devise a three-layered multi-head
31
+ attention network to realize the table-structure-
32
+ aware text generation model with the help of
33
+ the pretrained language model. Furthermore,
34
+ a multi-pass decoder framework is adopted to
35
+ enhance the capability of polishing generated
36
+ text for table descriptions. The empirical stud-
37
+ ies, as well as human evaluation, on two public
38
+ datasets, validate that our approach can gener-
39
+ ate faithful and fluent descriptive texts for dif-
40
+ ferent types of tables.
41
+ 1
42
+ Introduction
43
+ The task of learning to generate natural language
44
+ descriptions from non-linguistic input, which is
45
+ referred to as data-to-text, is important for many
46
+ applications, such as weather forecast genera-
47
+ tion (Mei et al., 2016), sports news writing (Wise-
48
+ man et al., 2017), biography writing (Lebret et al.,
49
+ 2016), market comments writing (Murakami et al.,
50
+ 2017) and automatic question-answering (Li et al.,
51
+ 2021b). The input data can be in various forms
52
+ ∗ This work was done when the first author was an intern
53
+ at Baidu Research under the supervision of the second author.
54
+ for data-to-text though, here we focus on the text
55
+ generation task that takes the table as input.
56
+ Inspired by neural machine translation models,
57
+ previous studies on table-to-text tasks mainly adopt
58
+ traditional seq2seq methods to generate table de-
59
+ scriptions (Lebret et al., 2016; Wiseman et al.,
60
+ 2017; Liu et al., 2018; Gong et al., 2019b; Wang
61
+ et al., 2020; Li et al., 2021a). Despite generating
62
+ text with high fluency, lacking numerous source
63
+ tables leads to lower generalizability of the table-
64
+ to-text model. Recent progress in the pretrained
65
+ language model (Devlin et al., 2019; Radford et al.,
66
+ 2019) shows remarkable performance in solving
67
+ natural language processing tasks. The model pre-
68
+ trained on large-scale data possesses rich knowl-
69
+ edge, which inspires us with the potential for solv-
70
+ ing generalization issues of the text generation task.
71
+ To exploit the expressive power of the pretrained
72
+ model for the table-to-text task, it is necessary to
73
+ serialize the input table effectively. Several works
74
+ have put efforts to bridge this gap, such as serial-
75
+ izing the table into a token sequence (Zhang et al.,
76
+ 2020; Suadaa et al., 2021; Xing and Wan, 2021),
77
+ or introducing an extra task to control the table
78
+ representation (Gong et al., 2020). However, none
79
+ of these leveraged the table structure information
80
+ effectively. Furthermore, the text-to-text pretrained
81
+ model decodes and generates a sequence in a one-
82
+ pass forward process, which means it cannot per-
83
+ ceive the future words in advance on the target side.
84
+ Recently, the deliberation mechanism (Niehues
85
+ et al., 2016; Geng et al., 2018) implemented by
86
+ the multi-pass decoder is proposed to tackle this
87
+ problem. However, how to adapt this approach for
88
+ text-to-text pretraining, which can be further ap-
89
+ plied to the table-to-text task, is another challenge.
90
+ To this end, we propose a table structure under-
91
+ standing and text deliberating approach, namely
92
+ TASD, to solve the table-to-text task with the pre-
93
+ trained language model enhanced by the deliber-
94
+ ation mechanism. Specifically, we first serialize
95
+ 1
96
+ arXiv:2301.02071v1 [cs.CL] 5 Jan 2023
97
+
98
+ Published as a conference paper at EMNLP 2022
99
+ the table input with customized templates which
100
+ do not acquire the target cells to be labeled. Then,
101
+ we employ the multi-head attention in a hierarchi-
102
+ cal way to learn the table representation that is
103
+ aware of table structure and apply it to guide the
104
+ fine-tuning of the text-to-text pretrained model. Af-
105
+ terward, we adopt the multi-pass decoder to realize
106
+ text deliberation. More specifically, we treat the
107
+ above table-structure-aware fine-tuned model as
108
+ the first-pass decoder and adopt another pretrained
109
+ model as the second-pass decoder to further polish
110
+ the descriptive text. In the second-pass decoding
111
+ phase, the table representation can be conveniently
112
+ leveraged as the “original text” in the text deliber-
113
+ ation mechanism. The main contributions of this
114
+ work can be summarized as follows:
115
+ • We propose a novel table-to-text generation
116
+ approach (i.e., TASD) to assimilating the com-
117
+ plete table information with the help of table
118
+ structure distillation, the pretrained language
119
+ model, and the text deliberation.
120
+ • We devise a table-structure-aware text gen-
121
+ eration model (TASATG) via the hierarchi-
122
+ cal multi-head attention network, which can
123
+ realize the content selection automatically.
124
+ And we develop an effective text deliberation
125
+ method dedicated to the table-to-text task.
126
+ • Extensive experiments conducted on two dif-
127
+ ferent datasets demonstrate that TASD out-
128
+ performs comparable baselines in terms of
129
+ various metrics.
130
+ 2
131
+ Related Work
132
+ 2.1
133
+ Table-to-Text Generation
134
+ Encouraged by the success of seq2seq methods
135
+ in machine translation and text summarization, re-
136
+ searchers proposed to formulate the input table as a
137
+ sequence of records (Lebret et al., 2016; Wiseman
138
+ et al., 2017), and further improve the performance
139
+ of table-to-text methods based on seq2seq by mod-
140
+ eling table representation (Liu et al., 2018; Gong
141
+ et al., 2019a). Introducing auxiliary tasks to enrich
142
+ the table representation (Tian et al., 2019; Li et al.,
143
+ 2021a) is another promising paradigm to address
144
+ the table-to-text problem. Moreover, there have
145
+ been studies focusing on how to disaggregate the
146
+ table-to-text pipeline effectively to generate more
147
+ faithful and fluent text, e.g. leveraging content
148
+ selection and planning (Puduppully et al., 2019;
149
+ Trisedya et al., 2020; Bai et al., 2021), combin-
150
+ ing autoregressive and non-autoregressive meth-
151
+ ods (Wang et al., 2021). In addition, recent Trans-
152
+ formers were also applied to solve the table-to-text
153
+ task (Gong et al., 2019b; Wang et al., 2020; Obeid
154
+ and Hoque, 2020). However, current table-to-text
155
+ methods may fail to tackle the overfitting problem
156
+ aroused by the lack of diversity in small datasets.
157
+ Fine-tuning the model pretrained in a large cor-
158
+ pus and adapting to a specific task is an effective
159
+ approach to tackling the generation issues disturbed
160
+ by small data and large parameters (Radford et al.,
161
+ 2019). (Kale and Rastogi, 2020) explored the feasi-
162
+ bility of applying the text-to-text pretrained model
163
+ to the table-to-text task, (Gong et al., 2020) applied
164
+ multi-task learning to solve the table-to-text task
165
+ with pretrained language model, and (Suadaa et al.,
166
+ 2021) leveraged pretrained language model for fact
167
+ inference in numerical table contents. However,
168
+ these approaches seldom perceived and integrated
169
+ the complete table information into the fine-tuning
170
+ of the pretrained model. A table-to-text pretrained
171
+ model (Xing and Wan, 2021) was proposed though,
172
+ the large and diversified table corpus is often un-
173
+ available. In addition, recent works on fact verifica-
174
+ tion taking tabular as input (Yin et al., 2020; Dong
175
+ and Smith, 2021) have suggested the effectiveness
176
+ of the table-structure-aware pretrained model.
177
+ 2.2
178
+ Text Deliberation
179
+ The encoder-decoder framework has been widely
180
+ applied to neural machine translation, while the
181
+ subsequent words are often invisible on the target
182
+ side when decoding a sequence. To alleviate this, re-
183
+ searchers proposed to decode and refine the output
184
+ sequence in multiple passes, like human cognitive
185
+ behavior when polishing an article. Studies have
186
+ been made on text deliberation, such as the solu-
187
+ tion with two separate stages (i.e., generating and
188
+ polishing) (Niehues et al., 2016), combining two
189
+ separate stages as one framework (Xia et al., 2017),
190
+ and deliberating generated text in multiple passes
191
+ adaptively via reinforcement learning (Geng et al.,
192
+ 2018) or customized evaluating architecture (Li
193
+ and Yao, 2021). To the best of our knowledge, we
194
+ are the first to apply the deliberation mechanism to
195
+ the table-to-text problem.
196
+ 3
197
+ Preliminaries
198
+ 3.1
199
+ Problem Formulation
200
+ Our table-to-text problem takes a table as input,
201
+ and we formulate a table as a sequence of records:
202
+ T = {τ1,1, τ1,2, · · · , τi,j, · · · , τm,n}, where m and n
203
+ denote the number of rows and columns of T, re-
204
+ 2
205
+
206
+ Published as a conference paper at EMNLP 2022
207
+ Figure 1: The framework overview of TASD.
208
+ spectively. Then, we aim to generate a document Y
209
+ containing words Y = y1y2 · · · yl that can describe
210
+ the content of T precisely, where l is the document
211
+ length. Formally, given a table T, the table-to-text
212
+ model is excepted to generate a descriptive docu-
213
+ ment Y in an auto-regressive way
214
+ yi = arg max P(yi | T, y1y2 · · · yi−1; Θ), i = 1, · · · , l
215
+ where Θ is the set of model parameters.
216
+ 3.2
217
+ Data
218
+ NumericNLG Dataset. The numericNLG dataset
219
+ was released by (Suadaa et al., 2021).
220
+ In this
221
+ dataset, the tables demonstrate experimental re-
222
+ sults in research papers, thus, most of the table
223
+ contents are numerical values. We use this dataset
224
+ to evaluate the accuracy and smoothness of the
225
+ generated descriptions for the table with numerical
226
+ content. In particular, for each table of numer-
227
+ icNLG, <table_id> acts as the pronoun of the
228
+ table, and <caption> is the descriptive text of the
229
+ table. Moreover, for each cell of a table, there are
230
+ <metric>, (row and column) <header>, and
231
+ <value> as different views of a cell.
232
+ Totto Dataset. The Totto dataset (Parikh et al.,
233
+ 2020) is an open-domain table-to-text dataset
234
+ collected from Wikipedia. The table contents
235
+ are mainly in text form. The metadata of
236
+ the
237
+ Totto
238
+ dataset
239
+ includes
240
+ <page_title>,
241
+ <section_title> and <section_text>. In
242
+ detail, each cell of a table has corresponding
243
+ <header> and <value>. Unlike numericNLG,
244
+ textual content in our Totto dataset accounts for
245
+ 62.4%, which can evaluate the text generation
246
+ effectiveness for the tables with textual records.
247
+ 4
248
+ Methodology
249
+ In this section, we introduce the proposed frame-
250
+ work in detail. As shown in Fig. 1, our framework
251
+ mainly consists of three components, i.e., template-
252
+ based table serialization, table-structure-aware
253
+ fine-tuning, and text deliberation. Specifically, we
254
+ first produce a sequence describing the table con-
255
+ tents with customized templates. The templates we
256
+ adopted do not require the target cells to be labeled.
257
+ Then, to generate informative text, we adopt full ta-
258
+ ble representation learning to guide the description
259
+ generation, such that the outcome text is capable
260
+ of emphasizing and delineating the facts in the ta-
261
+ ble from a macroscopic perspective. Finally, we
262
+ employ and adapt the multi-pass decoder to our
263
+ data-to-text problem, which can further fine-tune
264
+ the generated table description. Technical details
265
+ for all three modules will be introduced separately
266
+ in the following subsections.
267
+ 4.1
268
+ Template-based Table Serialization
269
+ To well harness the expressive power of the text-to-
270
+ text pretrained model for the input table, it is nec-
271
+ essary to serialize the raw table first. The template-
272
+ based representation offers us a simple yet effective
273
+ linearization approach to generating descriptive
274
+ texts which can reflect the facts in a table without
275
+ yielding an intractable downstream model.
276
+ In particular, the templates we adopted in this
277
+ work are devised to mention all the available facts
278
+ in the table without knowing the emphasized cells
279
+ in advance, which is different from (Suadaa et al.,
280
+ 2021). The template for describing facts consists
281
+ of two parts:
282
+ 1. The title or descriptive text that comes with
283
+ the table.
284
+ 2. A series of expressions, in which each one
285
+ describes the content of a cell.
286
+ More specifically, for the numericNLG dataset,
287
+ we apply the following template:
288
+ <table_id> shows <caption>. <metric1,1
289
+ > of <header1,1> is <value1,1>, · · · , <me
290
+ trici,j> of <headeri,j> is <valuei,j>, · · · .
291
+ For the Totto dataset, we apply another template:
292
+ As <page_title> <section_title>, <se
293
+ ction_text>. <header1,1> is <value1,1>,
294
+ · · · , <headeri,j> is <valuei, j>, · · · .
295
+ The second part of the template enumerates all the
296
+ cells in the table. This preliminary table represen-
297
+ tation, denoted by TS , covers all the available facts
298
+ in a raw table. Note that, the templates we adopt
299
+ may encounter the content selection problem. In
300
+ table-to-text applications, target cells in the input
301
+ table are often not highlighted and the generated
302
+ table description should emphasize certain cells.
303
+ 3
304
+
305
+ table 2 shows the overall mention
306
+ detection results on the test set of
307
+ ontonotes. prec. of our full model
308
+ prec. is 89.6. rec. of our full model
309
+ rec. is 82.2. f1 of our full model f1 is
310
+ table 2 shows the results on the ontonotes
311
+ 85..7.....
312
+ test set. f1 score of our full model is 86.2
313
+ our full model outperforms the state-of-
314
+ the-art by 1.8 points in f1 score.
315
+ table 2 shows the results on the ontonotes
316
+ test set. we can see that our full model
317
+ outperforms the state-of-the-art on all
318
+ metrics.Published as a conference paper at EMNLP 2022
319
+ Figure 2: The architecture of table-structure-aware text
320
+ generation model (i.e., TASATG).
321
+ 4.2
322
+ Table-Structure-Aware Text Generation
323
+ A text-to-text pretrained model can take the large-
324
+ scale corpus as input to possess vast knowledge
325
+ and generate texts in an unsupervised way so that
326
+ it has been widely applied to text-generation tasks.
327
+ When handling a specific text generation task, it is
328
+ effective to fine-tune the pretrained model on new
329
+ data. However, for the table-to-text task, some hid-
330
+ den information, like table structure, is most likely
331
+ to be overlooked, though the drafted TS mentions
332
+ all the available facts in the table. Thus, we pro-
333
+ pose to exploit table structure information to guide
334
+ fine-tuning of the text-to-text pretrained model.
335
+ As shown in Fig. 2, we first encode the table
336
+ content in a multi-view fashion. To be specific,
337
+ given a cell τi,j in a table T, it can be viewed from
338
+ different perspectives, such as the value of τi,j, the
339
+ row header of τi,j, and the column header of τi,j,
340
+ etc. Then, we treat the k-th view of τi,j as a to-
341
+ ken sequence which is denoted by x(k)
342
+ i,j . Afterward,
343
+ we pad x(k)
344
+ i,j with placeholders (if necessary) and
345
+ concatenate these token sequences as follows:
346
+ xi,j = x(1)
347
+ i,j ⊛ x(2)
348
+ i,j ⊛ · · · ,
349
+ (1)
350
+ where ⊛ denotes the concatenation operator, and
351
+ the multi-viewed representation of a table T is de-
352
+ noted as X = [x1,1, · · · , xi,j, · · · , xm,n]. Each to-
353
+ ken of x(k)
354
+ i,j can be encoded as a d-dimensional em-
355
+ bedding by looking up the text-to-text pretrained
356
+ model and updated accordingly when fine-tuning
357
+ the pretrained model. In this way, we can obtain
358
+ the semantic representation of table T, which is
359
+ denoted by E(0) ∈ Rm×n×s×d, where s is the length
360
+ of concatenated sequence xi,j.
361
+ To realize TASATG for table-to-text, we pro-
362
+ pose to employ multi-head attention (Vaswani et al.,
363
+ 2017) to guide fine-tuning of the text-to-text pre-
364
+ trained model. In particular, we adopt three multi-
365
+ head attention (MHA) layers to interactively extract
366
+ the information in the table in a hierarchical way.
367
+ Specifically, the MHA layer is defined as:
368
+ Qi = QWQ
369
+ i , Ki = KWK
370
+ i , Vi = VWV
371
+ i
372
+ head i = Attention (Qi, Ki, Vi) = softmax
373
+ �QiK⊤
374
+ i
375
+
376
+ d
377
+
378
+ Vi,
379
+ MHA(Q, K, V) = [ head 1, · · · , head h] WO,
380
+ where Q, K, V represent the query, key and value
381
+ in the attention mechanism, respectively.
382
+ As illustrated in Fig. 2, in the first MHA layer,
383
+ we add a cell text position embedding (E(ctpe) ∈
384
+ Rs×d) to each cell of the aforementioned E(0), and
385
+ feed it to the multi-head attention to implement cell
386
+ text self-attention,
387
+
388
+ E0 = E(0) ⊕ E(ctpe),
389
+ E(1) = MHA(�
390
+ E(0), �
391
+ E(0), �
392
+ E(0)),
393
+ E(1) = 1
394
+ s
395
+ s
396
+
397
+ i=1
398
+ (E(1)[:, :, i, :]) ,
399
+ (2)
400
+ where ⊕ denotes the element-wise addition opera-
401
+ tion. Consequently, E(1) ∈ Rm×n×d can be deemed
402
+ as an initial aggregated table representation. Next,
403
+ in the second MHA layer, we add a table position
404
+ embedding (E(tpe) ∈ Rm×n×d) to E(1) to implement
405
+ table structure self-attention,
406
+
407
+ E(1) = E(1) ⊕ E(tpe),
408
+ E(2) = MHA(�
409
+ E(1), �
410
+ E(1), �
411
+ E(1)).
412
+ (3)
413
+ E(2) ∈ Rm×n×d is the table-structure-aware represen-
414
+ tation. Moreover, in the third MHA layer, we ap-
415
+ ply a multi-head cross-attention to take the hidden
416
+ state of the text-to-text pretrained model (denoted
417
+ by H ∈ Rs×d) as the attention query, such that we
418
+ can focus on the important cells of the table,
419
+ �H = MHA(H, E(2), E(2)) ⊕ H.
420
+ (4)
421
+ This new hidden state �H guided by the table repre-
422
+ sentation will replace the original hidden state H
423
+ in the text-to-text pretrained model to generate the
424
+ probability of the next word.
425
+ Note that, the cross attention weights on differ-
426
+ ent table cells based on the previous words can
427
+ realize the content selection automatically. In ad-
428
+ dition, we implement the text-to-text pretrained
429
+ model with GPT2 (Radford et al., 2019), which
430
+ adopts a decoder-only Transformer architecture.
431
+ 4
432
+
433
+ Text-to-Text Pretrained Model
434
+ Multi-Head Self-Attention
435
+ Multi-Head Self-Attention
436
+ Multi-Head Cross-Attention
437
+ Next Word
438
+ Previous
439
+ Probability
440
+ WordsPublished as a conference paper at EMNLP 2022
441
+ (a) Training.
442
+ (b) First and second fine-tuning of TASATG with vali-
443
+ dation data.
444
+ (c) Testing.
445
+ Figure 3: Training, validation and testing procedures of the proposed TASD approach.
446
+ 4.3
447
+ Text Deliberation
448
+ The encoder-decoder framework applied in many
449
+ sequence generation tasks often adopts a one-pass
450
+ process while decoding a sequence. Though effi-
451
+ cient, the one-pass decoder cannot perceive future
452
+ context for further text deliberation. Multi-pass de-
453
+ coder extends the capability of generating more
454
+ refined text by exploring global information in the
455
+ sequence (Niehues et al., 2016; Xia et al., 2017).
456
+ For the text-to-text pretrained model, due to the
457
+ huge amount of parameters of the pretrained lan-
458
+ guage model, it is unwise to directly combine the
459
+ models in different passes. A common solution is
460
+ to concatenate the original serialized table content
461
+ and the text generated in the previous pass to fine-
462
+ tune the pretrained model in the next-pass decoding.
463
+ However, in this way, the length of input text prob-
464
+ ably exceeds the limit of the text-to-text pretrained
465
+ model, and the time complexity is too high.
466
+ To effectively implement the fine-tuning of the
467
+ text-to-text pretrained model in multiple passes,
468
+ as shown in Figs. 3a and 3b, we take the table
469
+ representation as the “original text” and feed the
470
+ text generated in the first-pass fine-tuning plus
471
+ the table representation to the second-pass fine-
472
+ tuning. Note that, as shown in Fig. 3a, we sep-
473
+ arately fine-tune the table-to-text generation task
474
+ and the text-to-text deliberation task with two inde-
475
+ pendent TASATG models, and each of them takes
476
+ a text-to-text pretrained model as the backbone.
477
+ 5
478
+ Experiments
479
+ 5.1
480
+ Experimental Settings
481
+ Data. We conducted experiments on the aforemen-
482
+ tioned datasets, i.e., numericNLG and Totto. The
483
+ statistics of the numericNLG dataset can be found
484
+ in (Suadaa et al., 2021). Besides, the size of the
485
+ original Totto dataset is 120K, which is much larger
486
+ than the numericNLG dataset. To evaluate differ-
487
+ ent methods for table-to-text with comparable data
488
+ size, for the Totto dataset, we filtered out the tables
489
+ with fewer rows and columns, i.e., #rows < 8 and
490
+ #columns < 8, such that the filtered Totto dataset
491
+ contains 1.8K tables. Then, we randomly selected
492
+ 1.2K1 tables to generate the new Totto dataset.
493
+ Evaluation Metrics. We calculated BLEU (from
494
+ gram-1 to gram-4) (Papineni et al., 2002), ROUGE-
495
+ L (Lin, 2004) and METEOR (Denkowski and
496
+ Lavie, 2014) to evaluate the quality of the gen-
497
+ erated text. The BLEU-n with a small value of n
498
+ measures the accuracy of the word level, and the
499
+ BLEU-n with a large n can measure the fluency
500
+ of the sentence. The ROUGE-L measures the re-
501
+ call rate based on the longest common sequence
502
+ between source and target texts. The METEOR is
503
+ based on the harmonic mean of unigram precision
504
+ and recall, with recall weighted higher than preci-
505
+ sion. These metrics are widely used to measure the
506
+ accuracy and fluency of the generated sentence.
507
+ Baselines. We compare TASD with the following
508
+ baselines.
509
+ • Template-based Table Serialization.
510
+ We
511
+ use the template designed for table serial-
512
+ ization as a baseline. Note that, the token
513
+ sequence generated by the template-based
514
+ method is denoted as TS .
515
+ • Pointer Generator (See et al., 2017). This
516
+ is a seq2seq model with the attention and
517
+ copy mechanism. We take TS as input for
518
+ the pointer generator model.
519
+ • TRM. We implemented a simplified version
520
+ of the proposed TASD that omits the pos-
521
+ sessed knowledge in the pretrained language
522
+ model and removes text deliberation for focus-
523
+ ing on table representation modeling, namely
524
+ TRM. In particular, TRM adopts the architec-
525
+ ture of GPT2 but initializes the parameters
526
+ randomly and trains 100 epochs at most for
527
+ fine-tuning. Besides, TRM takes TS plus the
528
+ table structure representation as input and is
529
+ fed with TS in the inference phase.
530
+ 1The size of numericNLG data is 1.3K.
531
+ 5
532
+
533
+ 2nd Fine-tuned TASATG
534
+ TASATG
535
+ TASATG
536
+ 1st Fine-tuned TASATG1st Fine-tuned TASATG
537
+ Candidate Fine-tuned TASATG 1
538
+ Candidate Fine-tuned TASATG 1
539
+ Candidate Fine-tuned TASATG 2
540
+ TASATG
541
+ Candidate Fine-tuned TASATG 2
542
+ TASATG
543
+ Candidate Fine-tuned TASATG 3
544
+ Candidate Fine-tuned TASATG 3
545
+ 1st Fine-tuned TASATG
546
+ 2nd Fine-tuned TASATG2ndFine-tunedTASATG
547
+ 1st Fine-tunedTASATGPublished as a conference paper at EMNLP 2022
548
+ Table 1: Performance comparisons of the automatic evaluation on the numericNLG dataset.
549
+ Method
550
+ BLEU-1
551
+ BLEU-2
552
+ BLEU-3
553
+ BLEU-4
554
+ METEOR ROUGE-L
555
+ Template-based Method
556
+ 10.28
557
+ 5.52
558
+ 2.83
559
+ 1.14
560
+ 11.31
561
+ 11.49
562
+ Pointer Generator
563
+ 5.10±0.59
564
+ 2.71±0.19
565
+ 1.16±0.17
566
+ 0.56±0.04
567
+ 7.82±0.15
568
+ 15.21±0.14
569
+ TRM
570
+ 14.16±0.97
571
+ 6.05±0.50
572
+ 2.11±0.13
573
+ 0.80±0.12
574
+ 9.72±0.94
575
+ 12.72±0.80
576
+ Fine-tuned GPT2
577
+ 16.13±0.56
578
+ 9.02±0.31
579
+ 4.68±0.22
580
+ 2.20±0.22
581
+ 10.14±0.32
582
+ 17.48±0.36
583
+ TableGPT
584
+ 18.69±0.39
585
+ 8.21±0.24
586
+ 3.31±0.19
587
+ 1.51±0.14
588
+ 11.06±0.18
589
+ 16.90±0.27
590
+ TASD w/o TAS
591
+ 18.20±2.40
592
+ 9.74±1.01
593
+ 4.38±0.31
594
+ 1.98±0.39
595
+ 10.64±0.86
596
+ 19.29±1.77
597
+ TASD w/o D
598
+ 18.02±0.50
599
+ 10.06±0.25
600
+ 5.20±0.13
601
+ 2.47±0.20
602
+ 10.99±0.29
603
+ 18.57±0.27
604
+ TASD w/o 1st-TAS
605
+ 20.07±1.94
606
+ 10.35±0.69
607
+ 4.67±0.35
608
+ 2.05±0.34
609
+ 11.52±0.80
610
+ 20.10±0.62
611
+ TASD
612
+ 21.81±1.13
613
+ 11.03±0.11
614
+ 4.92±0.22
615
+ 2.15±0.39
616
+ 11.87±0.40
617
+ 20.40±0.80
618
+ Table 2: Performance comparisons of the automatic evaluation on the Totto dataset.
619
+ Method
620
+ BLEU-1
621
+ BLEU-2
622
+ BLEU-3
623
+ BLEU-4
624
+ METEOR ROUGE-L
625
+ Template-based Method
626
+ 0.84
627
+ 0.43
628
+ 0.23
629
+ 0.09
630
+ 4.59
631
+ 1.51
632
+ Pointer Generator
633
+ 11.34±1.57
634
+ 2.05±0.83
635
+ 0.45±0.27
636
+ 0.35±0.13
637
+ 5.38±0.78
638
+ 14.46±1.46
639
+ TRM
640
+ 10.21±1.79
641
+ 3.44±0.88
642
+ 1.21±0.48
643
+ 0.54±0.25
644
+ 9.30±1.16
645
+ 11.52±2.03
646
+ Fine-tuned GPT2
647
+ 9.53±0.51
648
+ 3.65±0.34
649
+ 1.18±0.37
650
+ 0.40±0.26
651
+ 9.89±0.39
652
+ 10.69±0.27
653
+ TableGPT
654
+ 6.80±0.26
655
+ 3.51±0.22
656
+ 1.33±0.21
657
+ 0.76±0.12
658
+ 11.10±0.42
659
+ 11.73±0.44
660
+ TASD w/o TAS
661
+ 13.70±0.90
662
+ 4.44±0.69
663
+ 1.28±0.47
664
+ 0.65±0.35
665
+ 10.79±0.83
666
+ 14.47±1.11
667
+ TASD w/o D
668
+ 10.03±0.39
669
+ 4.42±0.29
670
+ 1.64±0.36
671
+ 0.71±0.38
672
+ 10.29±0.49
673
+ 10.67±0.34
674
+ TASD w/o 1st-TAS
675
+ 13.90±0.60
676
+ 5.07±0.61
677
+ 1.68±0.52
678
+ 0.79±0.25
679
+ 10.98±0.40
680
+ 14.88±0.71
681
+ TASD
682
+ 14.19±1.08
683
+ 5.17±0.38
684
+ 1.71±0.32
685
+ 0.78±0.21
686
+ 11.65±0.71
687
+ 14.96±1.10
688
+ • Fine-tuned GPT2 (Radford et al., 2019). We
689
+ take the concatenation of TS and Y as the in-
690
+ put for fine-tuning. In the inference phase,
691
+ we only feed TS to the model to generate Y
692
+ starting after the last token of TS .
693
+ • TableGPT (Gong et al., 2020). TableGPT is
694
+ a state-of-the-art table-to-text method. To im-
695
+ prove the text fidelity and exploit the struc-
696
+ tural information at the same time, TableGPT
697
+ employs a multi-task learning paradigm con-
698
+ sisting of two auxiliary tasks, that is, one task
699
+ reconstructs the table structure from represen-
700
+ tations of GPT2, and the other aligns the tables
701
+ and the information in the generated text.
702
+ Implementation Details. The split settings for
703
+ training, validation and, testing were 1084:136:135
704
+ 2 for the numericNLG dataset and 960:120:120
705
+ for the Totto dataset, respectively. Regarding auto-
706
+ matic evaluation, all results of deep models were
707
+ obtained by conducting experiments on a Linux
708
+ machine with Nvidia A100 GPU, and the averaged
709
+ results of 5 runs were reported. Besides, an Adam
710
+ 2This setting follows the experiments of (Suadaa et al.,
711
+ 2021).
712
+ optimizer was utilized (with an initial learning rate
713
+ of 3e-5) for GPT2 fine-tuning, and the training was
714
+ iterated in 20 epochs at most. A beam search algo-
715
+ rithm was adopted when decoding a sequence and
716
+ the beam width was set to 5 3.
717
+ 5.2
718
+ Automatic Evaluation
719
+ The comparisons of automatic evaluation results
720
+ between TASD and other baselines can be found
721
+ in Tables 1 and 2. In general, TASD outperforms
722
+ the baselines for all the metrics on two datasets. In
723
+ particular, compared to the reported best result of
724
+ all the baselines, TASD achieves improvements of
725
+ 3.12 for BLEU-1 (18.69 → 21.81), 2.01 for BLEU-
726
+ 2 (9.02 → 11.03), 0.24 for BLEU-3 (4.68 → 4.92),
727
+ 0.56 for METEOR (11.31 → 11.87), and 2.92 for
728
+ ROUGE-L (17.48 → 20.40) on the numericNLG
729
+ dataset, and 2.85 for BLEU-1 (11.34 → 14.19),
730
+ 1.52 for BLEU-2 (3.65 → 5.17), 0.38 for BLEU-3
731
+ (1.33 → 1.71), 0.02 for BLEU-4 (0.76 → 0.78),
732
+ 0.55 for METEOR (11.10 → 11.65), and 0.50 for
733
+ ROUGE-L (14.46 → 14.96) on the Totto dataset.
734
+ 3Our implementation is available at https://github.
735
+ com/ramber1836/TASD.
736
+ 6
737
+
738
+ Published as a conference paper at EMNLP 2022
739
+ In other words, for different types of source tables,
740
+ TASD generates better descriptive texts w.r.t. accu-
741
+ racy at the word level, recall of the sequence, and
742
+ fluency of sentences.
743
+ Besides, we have the following observations: 1)
744
+ The template-based method performs much bet-
745
+ ter on the numericNLG dataset compared to the
746
+ Totto dataset, since the referenced table descrip-
747
+ tions in numericNLG were collected from scientific
748
+ papers, however, the table summaries in the Totto
749
+ dataset are more diverse. 2) In the Totto dadaset,
750
+ the pointer generator model tends to cover more
751
+ words in descriptive text and generate more fluent
752
+ sentences than the template-based method, as the
753
+ contents in source tables of the Totto dataset are
754
+ mostly linguistic. This can also explain why the
755
+ pointer generator performs worse than the template-
756
+ based method on the numericNLG dataset w.r.t.
757
+ BLEU and METEOR. 3) Fine-tuned GPT2 can
758
+ generate more faithful and fluent text than other
759
+ baselines (refer to Tables 1 and 2) most of the time,
760
+ which validates the effectiveness of the pretrained
761
+ language model. 4) In general, TableGPT performs
762
+ better, and even the best, among all the baselines.
763
+ In the numericNLG dataset, the headers of the
764
+ input tables (a.k.a. the attributes of records for
765
+ TableGPT) are more diverse, which may explain
766
+ why the performance of TableGPT is not promising
767
+ as expected on the numericNLG dataset. 5) TRM
768
+ can generate comparable, or even better descriptive
769
+ text as fined-tuned GPT2, which further suggests
770
+ the effectiveness of table structure understanding.
771
+ 5.3
772
+ Ablation Analysis
773
+ Moreover, to verify the effectiveness of different
774
+ modules, we compare TASD with its variants.
775
+ • After generating text with fine-tuned GPT2,
776
+ we fed the generated text concatenated with
777
+ TS to another fine-tuned GPT2 to realize the
778
+ second-pass decoder without table structure
779
+ representation.
780
+ • We implemented TASD without deliberating
781
+ on the outcome text, which means that we
782
+ realized TASATG based on GPT2 in a one-
783
+ pass forward process.
784
+ • TASD w/o 1st-TAS. We removed table struc-
785
+ ture modeling in the first-pass decoding from
786
+ TASD, which was implemented by taking the
787
+ fine-tuned GPT2 as the first-pass decoder and
788
+ the table-structure-aware fine-tuned GPT2 as
789
+ the second-pass decoder.
790
+ As can be seen in Tables 1 and 2, TASD w/o TAS
791
+ performs worse than TASD under all metrics, since
792
+ the table structure modeling can benefit the fine-
793
+ tuning of GPT2. This can also be validated by com-
794
+ paring fine-tuned GPT2 to TASD w/o D. Besides,
795
+ the effectiveness of deliberating text can be proven
796
+ by comparing TASD w/o D to TASD (this can also
797
+ be validated by comparing fine-tuned GPT2 to
798
+ TASD w/o TAS). While text deliberation may harm
799
+ sentence fluency as depicted by the results of these
800
+ methods w.r.t. BLEU-3 & 4 in Table 1. In addition,
801
+ TASD w/o 1st-TAS outperforms TASD w/o TAS under
802
+ all metrics suggesting that taking the table repre-
803
+ sentation as the “original text” in the deliberation
804
+ mechanism is also effective.
805
+ 5.4
806
+ Qualitative Analysis
807
+ Figs. 4(a) and (b) show two selected source ta-
808
+ bles and corresponding descriptive texts (i.e., cap-
809
+ tion and section_text) in numericNLG and Totto
810
+ datasets. Fig. 4(c) demonstrates the generated de-
811
+ scriptions by different methods. The text that cor-
812
+ rectly reflects the facts of the source table is in
813
+ green, the erroneous text is in red, and the con-
814
+ fusing text is in blue. We can see that, there are
815
+ many grammatical errors in the text produced by
816
+ the pointer generator. Fine-tuned GPT2 tends to
817
+ repeat phrases and sentences due to the limited
818
+ knowledge about the input table, which can also
819
+ explain why the fine-tuned GPT2 can obtain a false
820
+ high score in BLEU-n as n grows. Thanks to the
821
+ semantic knowledge brought by pretraining, fine-
822
+ tuned GPT2 can generate more natural descriptions,
823
+ in which, however, perplexing factual errors ex-
824
+ ist. Compared to fine-tuned GPT2, the description
825
+ generated by TASD is more relevant to the table
826
+ contents. Since the target cells are not known in
827
+ advance, the generated text may miss the empha-
828
+ sized points described in the reference. The text
829
+ generated by TableGPT is also fluent, though coun-
830
+ terfactual descriptions may exist.
831
+ 5.5
832
+ Human Evaluation
833
+ We randomly selected 30 samples from the test set
834
+ in numericNLG and Totto datasets, respectively,
835
+ and invited 10 volunteers to evaluate the quality of
836
+ the outcome text by considering three criteria, i.e.,
837
+ grammar, coherence & concise, and factual per-
838
+ spective (correct and relevant). Each criterion has
839
+ scores of five degrees, ranging from 1 (the worst) to
840
+ 5 (the best). The averaged scores were reported in
841
+ Table 3, which show that TASD can generate more
842
+ 7
843
+
844
+ Published as a conference paper at EMNLP 2022
845
+ Figure 4: Two examples of the generated table descriptions.
846
+ Table 3: Result of Human Evaluation
847
+ Dataset
848
+ Method
849
+ Grammar Coherence
850
+ & Concise
851
+ Factual per-
852
+ spective
853
+ numericNLG
854
+ Pointer Genera-
855
+ tor
856
+ 3.16±0.99
857
+ 2.73±1.20
858
+ 1.54±0.69
859
+ Fine-tuned
860
+ GPT2
861
+ 3.42±0.56
862
+ 3.11±0.58
863
+ 2.51±0.45
864
+ TASD w/o D
865
+ 3.72±0.61
866
+ 3.48±0.55
867
+ 2.82±0.45
868
+ TASD
869
+ 4.17±0.72
870
+ 3.98±0.64
871
+ 3.15±0.73
872
+ Totto
873
+ Pointer Genera-
874
+ tor
875
+ 2.03±0.71
876
+ 1.89±0.82
877
+ 1.56±0.55
878
+ Fine-tuned
879
+ GPT2
880
+ 2.60±0.55
881
+ 2.36±0.64
882
+ 1.85±0.46
883
+ TASD w/o D
884
+ 2.63±0.52
885
+ 2.46±0.60
886
+ 1.89±0.46
887
+ TASD
888
+ 3.4±0.66
889
+ 3.18±0.70
890
+ 2.25±0.69
891
+ readable and coherent texts, and describe more
892
+ correct facts. Moreover, the pretrained models con-
893
+ sistently achieve better scores than the pointer gen-
894
+ erator on grammar and coherence because of the
895
+ expressive power learned from the large-scale cor-
896
+ pus. In the Totto dataset, the improvement of the
897
+ table structure modeling is smaller than that of the
898
+ polishing mechanism, which is consistent with the
899
+ automatic evaluation results in Table 2.
900
+ 6
901
+ Discussion
902
+ In our work, we devised a two-pass decoder frame-
903
+ work dedicated to the table-to-text task with the
904
+ help of the table-structure-aware text generation
905
+ model (i.e., TASATG). However, the effectiveness
906
+ of the text deliberation for the table-to-text task
907
+ should be further explored and integrated into the
908
+ table-structure-aware modeling in a more harmonic
909
+ Figure 5:
910
+ Table reconstruction for table-structure-
911
+ aware modeling enhancement.
912
+ manner. To discuss the limitation of the text de-
913
+ liberation of TASD, we additionally developed a
914
+ table content reconstruction loss and integrate it
915
+ into TASD in a multi-task learning fashion.
916
+ Specifically, given the table-structure-aware em-
917
+ bedding E(2) generated with Eq. (3), we randomly
918
+ mask certain cells of the input table and yield a
919
+ partially corrupted embedding of the input table,
920
+ denoted by �
921
+ E(2). Then, a two-layer MLP (i.e., multi-
922
+ layer perceptron) is adopted to restore the table-
923
+ structure-aware embedding. Afterward, an MSE
924
+ (i.e., mean square error) loss is adopted to mea-
925
+ sure the effectiveness of table reconstruction and
926
+ further integrated into the TASD framework in the
927
+ multi-task learning paradigm. The process of table
928
+ reconstruction is demonstrated in Fig. 5.
929
+ We carried out a series of experiments to evalu-
930
+ ate the performance of TASD w/ and w/o the help
931
+ of table reconstruction loss (i.e., TRLoss) on nu-
932
+ mericNLG and Totto datasets in terms of BLEU-n
933
+ (1 to 4), METEOR, and ROUGE-L. The results can
934
+ be found in Tables 4 and 5.
935
+ 8
936
+
937
+ our model also outperforms transformer
938
+ our model.
939
+ compares.
940
+ transformer significantly outperforms transformer
941
+ the effectiveness of transformer
942
+ the comparison of our model with transformer on
943
+ caption
944
+ frenchenglish translation task.
945
+ the size of our
946
+ model is slightly larger than the size of the transformer model
947
+ the evaluation metrics of our model and the transformer. our
948
+ model achieves the best results on all the metrics
949
+ suvarnabhumi is the busiest airport in thailand
950
+ international airport international airport
951
+ international airport international airport...
952
+ 2018, airport is airport is airport...
953
+ busiest
954
+ after
955
+ suvarnabhumi airport
956
+ is the third-busiest airport in thailand after
957
+ suvarnabhumi airport
958
+ section_titleTASATG
959
+ GPT2 Fine-Tuning Loss
960
+ Table Reconstruction Loss
961
+ MLPPublished as a conference paper at EMNLP 2022
962
+ Table 4: The performances of TASD w/ and w/o the table reconstruction on the numericNLG dataset.
963
+ Method
964
+ BLEU-1
965
+ BLEU-2
966
+ BLEU-3
967
+ BLEU-4
968
+ METEOR ROUGE-L
969
+ TASD w/o D
970
+ 18.02±0.50
971
+ 10.06±0.25
972
+ 5.20±0.13
973
+ 2.47±0.20
974
+ 10.99±0.29
975
+ 18.57±0.27
976
+ TASD w/o D w/ TRLoss
977
+ 20.56±0.25
978
+ 11.57±0.21
979
+ 5.90±0.23
980
+ 2.98±0.17
981
+ 12.00±0.48
982
+ 20.50±0.39
983
+ TASD w/ TRLoss
984
+ 19.29±0.38
985
+ 10.12±0.24
986
+ 5.32±0.25
987
+ 2.62±0.22
988
+ 12.18±0.90
989
+ 18.95±0.69
990
+ TASD w/ TRLoss in 1st pass
991
+ 18.23±0.68
992
+ 9.39±0.52
993
+ 4.64±0.26
994
+ 2.36±0.24
995
+ 11.51±0.78
996
+ 18.13±0.45
997
+ TASD w/ TRLoss in 2nd pass
998
+ 19.38±2.21
999
+ 10.33±1.34
1000
+ 5.11±0.73
1001
+ 2.40±0.38
1002
+ 11.35±0.92
1003
+ 18.69±1.05
1004
+ TASD
1005
+ 21.81±1.13
1006
+ 11.03±0.11
1007
+ 4.92±0.22
1008
+ 2.15±0.39
1009
+ 11.87±0.40
1010
+ 20.40±0.80
1011
+ Table 5: The performances of TASD w/ and w/o the table reconstruction on the Totto dataset.
1012
+ Method
1013
+ BLEU-1
1014
+ BLEU-2
1015
+ BLEU-3
1016
+ BLEU-4
1017
+ METEOR ROUGE-L
1018
+ TASD w/o D
1019
+ 10.03±0.39
1020
+ 4.42±0.29
1021
+ 1.64±0.36
1022
+ 0.71±0.38
1023
+ 10.29±0.49
1024
+ 10.67±0.34
1025
+ TASD w/o D w/ TRLoss
1026
+ 9.94±0.43
1027
+ 4.35±0.31
1028
+ 1.63±0.31
1029
+ 0.75±0.13
1030
+ 10.37±0.22
1031
+ 10.62±0.60
1032
+ TASD w/ TRLoss
1033
+ 14.57±0.87
1034
+ 5.22±0.42
1035
+ 1.70±0.49
1036
+ 0.89±0.38
1037
+ 11.79±0.77
1038
+ 15.28±0.86
1039
+ TASD w/ TRLoss in 1st pass
1040
+ 14.00±0.82
1041
+ 5.31±0.27
1042
+ 1.72±0.25
1043
+ 0.75±0.13
1044
+ 11.02±0.77
1045
+ 14.74±0.51
1046
+ TASD w/ TRLoss in 2nd pass
1047
+ 13.89±0.58
1048
+ 4.78±0.61
1049
+ 1.47±0.14
1050
+ 0.52±0.20
1051
+ 11.07±0.66
1052
+ 14.73±0.79
1053
+ TASD
1054
+ 14.19±1.08
1055
+ 5.17±0.38
1056
+ 1.71±0.32
1057
+ 0.78±0.21
1058
+ 11.65±0.71
1059
+ 14.96±1.10
1060
+ According to the results reported on the nu-
1061
+ mericNLG dataset, the TRLoss is helpful in en-
1062
+ hancing the capability of table comprehension
1063
+ though, the best performance is achieved by TASD
1064
+ w/o D w/ TRLoss. It seems that the performance im-
1065
+ provement gained by the table comprehension en-
1066
+ hancement is sacrificed after the text deliberation
1067
+ is adopted. Meanwhile, on the Totto dataset, TASD
1068
+ with the table reconstruction (i.e., TASD w/ TRLoss)
1069
+ does achieve the best performance in terms of
1070
+ BLEU-1, BLEU-2, METEOR, and ROUGE-L,
1071
+ though the improvement is not remarkable. The
1072
+ contents of the input tables are mainly linguistic
1073
+ and the table structures are not too diverse might
1074
+ be able to explain the performance improvement
1075
+ of TASD w/ TRLoss on the Totto dataset. With the
1076
+ above comparisons, we can conclude that, for the
1077
+ input tables with diverse structures, the limitation
1078
+ of the current text deliberation mechanism cannot
1079
+ be neglected if one aims to enhance the capability
1080
+ of table comprehension for the table-to-text task.
1081
+ Moreover, this also suggests that the generalization
1082
+ capability of text deliberation of TASD should be
1083
+ improved in the future.
1084
+ Limitations. In this work, long tables in the
1085
+ Totto dataset are removed since the efficiency and
1086
+ performance of TASD on large tables could be low-
1087
+ ered. In the future, the capability of handling long
1088
+ tables for table-to-text models should be further ex-
1089
+ plored. Besides, a large-scale and more exhaustive
1090
+ human evaluation is necessary. We plan to recruit
1091
+ more volunteers to conduct the human annotation.
1092
+ 7
1093
+ Conclusion
1094
+ In this paper, to realize table-to-text with the pre-
1095
+ trained language model, we proposed a table struc-
1096
+ ture understanding and text deliberating approach,
1097
+ namely TASD. The table structure understanding
1098
+ was realized by developing a hierarchical multi-
1099
+ head attention network, which can benefit the fine-
1100
+ tuning of the text-to-text pretrained model. The
1101
+ fully represented table information benefits not
1102
+ only the pretrained language model but also the
1103
+ text deliberation process since the structure infor-
1104
+ mation with rich semantics could be fed into the
1105
+ second-pass decoding naturally. We carried out ex-
1106
+ tensive experiments on two public datasets with
1107
+ different table types. Automatic and human-based
1108
+ evaluations, as well as qualitative analysis, vali-
1109
+ date the effectiveness of our approach to generating
1110
+ faithful and fluent table descriptions. In the future,
1111
+ we will improve text deliberation by devising a
1112
+ unified framework to integrate the multi-pass de-
1113
+ coder and refine the descriptive text paying more
1114
+ attention to sentence fluency.
1115
+ Acknowledgements
1116
+ This work is supported in part by Foshan
1117
+ HKUST Projects (FSUST21-FYTRI01A, FSUST
1118
+ 21-FYTRI02A).
1119
+ 9
1120
+
1121
+ Published as a conference paper at EMNLP 2022
1122
+ References
1123
+ Yang Bai, Ziran Li, Ning Ding, Ying Shen, and Hai-
1124
+ Tao Zheng. 2021. Infobox-to-text generation with
1125
+ tree-like planning based attention network. In IJCAI,
1126
+ pages 3773–3779.
1127
+ Michael Denkowski and Alon Lavie. 2014. Meteor uni-
1128
+ versal: Language specific translation evaluation for
1129
+ any target language. In 9th workshop on statistical
1130
+ machine translation, pages 376–380.
1131
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and
1132
+ Kristina Toutanova. 2019. Bert: Pre-training of deep
1133
+ bidirectional transformers for language understand-
1134
+ ing. In NAACL-HLT, pages 4171–4186.
1135
+ Rui Dong and David A Smith. 2021. Structural encod-
1136
+ ing and pre-training matter: Adapting bert for table-
1137
+ based fact verification. In EACL, pages 2366–2375.
1138
+ Xinwei Geng, Xiaocheng Feng, Bing Qin, and Ting
1139
+ Liu. 2018. Adaptive multi-pass decoder for neural
1140
+ machine translation. In EMNLP, pages 523–532.
1141
+ Heng Gong, Xiaocheng Feng, Bing Qin, and Ting Liu.
1142
+ 2019a. Table-to-text generation with effective hier-
1143
+ archical encoder on three dimensions (row, column
1144
+ and time). In EMNLP-IJCNLP, pages 3143–3152.
1145
+ Heng Gong, Yawei Sun, Xiaocheng Feng, Bing
1146
+ Qin, Wei Bi, Xiaojiang Liu, and Ting Liu. 2020.
1147
+ Tablegpt: Few-shot table-to-text generation with ta-
1148
+ ble structure reconstruction and content matching.
1149
+ In COLING, pages 1978–1988.
1150
+ Li Gong, Josep M Crego, and Jean Senellart. 2019b.
1151
+ Enhanced transformer model for data-to-text genera-
1152
+ tion. In Proceedings of the 3rd Workshop on Neural
1153
+ Generation and Translation, pages 148–156.
1154
+ Mihir Kale and Abhinav Rastogi. 2020. Text-to-text
1155
+ pre-training for data-to-text tasks. In INLG, pages
1156
+ 97–102.
1157
+ Rémi Lebret, David Grangier, and Michael Auli. 2016.
1158
+ Neural text generation from structured data with
1159
+ application to the biography domain.
1160
+ In EMNLP,
1161
+ pages 1203–1213.
1162
+ Liang Li, Can Ma, Yinliang Yue, and Dayong Hu.
1163
+ 2021a. Improving encoder by auxiliary supervision
1164
+ tasks for table-to-text generation. In ACL-IJCNLP,
1165
+ pages 5979–5989.
1166
+ Xiao Li, Yawei Sun, and Gong Cheng. 2021b. Tsqa:
1167
+ Tabular scenario based question answering. In AAAI,
1168
+ volume 35, pages 13297–13305.
1169
+ Yangming Li and Kaisheng Yao. 2021.
1170
+ Rewriter-
1171
+ evaluator architecture for neural machine translation.
1172
+ In ACL-IJCNLP, pages 5701–5710.
1173
+ Chin-Yew Lin. 2004. Rouge: A package for automatic
1174
+ evaluation of summaries.
1175
+ In Text summarization
1176
+ branches out, pages 74–81.
1177
+ Tianyu Liu, Kexiang Wang, Lei Sha, Baobao Chang,
1178
+ and Zhifang Sui. 2018. Table-to-text generation by
1179
+ structure-aware seq2seq learning. In AAAI.
1180
+ Hongyuan Mei, Mohit Bansal, and Matthew R Walter.
1181
+ 2016. What to talk about and how? selective gener-
1182
+ ation using lstms with coarse-to-fine alignment. In
1183
+ NAACL-HLT, pages 720–730.
1184
+ Soichiro
1185
+ Murakami,
1186
+ Akihiko
1187
+ Watanabe,
1188
+ Akira
1189
+ Miyazawa, Keiichi Goshima, Toshihiko Yanase, Hi-
1190
+ roya Takamura, and Yusuke Miyao. 2017. Learning
1191
+ to generate market comments from stock prices. In
1192
+ ACL, pages 1374–1384.
1193
+ Jan Niehues, Eunah Cho, Thanh-Le Ha, and Alex
1194
+ Waibel. 2016.
1195
+ Pre-translation for neural machine
1196
+ translation. In COLING, pages 1828–1836.
1197
+ Jason Obeid and Enamul Hoque. 2020. Chart-to-text:
1198
+ Generating natural language descriptions for charts
1199
+ by adapting the transformer model. In INLG, pages
1200
+ 138–147.
1201
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-
1202
+ Jing Zhu. 2002. Bleu: a method for automatic eval-
1203
+ uation of machine translation. In ACL, pages 311–
1204
+ 318.
1205
+ Ankur Parikh, Xuezhi Wang, Sebastian Gehrmann,
1206
+ Manaal Faruqui, Bhuwan Dhingra, Diyi Yang, and
1207
+ Dipanjan Das. 2020. Totto: A controlled table-to-
1208
+ text generation dataset.
1209
+ In EMNLP, pages 1173–
1210
+ 1186.
1211
+ Ratish Puduppully, Li Dong, and Mirella Lapata. 2019.
1212
+ Data-to-text generation with content selection and
1213
+ planning. In AAAI, volume 33, pages 6908–6915.
1214
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan,
1215
+ Dario Amodei, Ilya Sutskever, et al. 2019.
1216
+ Lan-
1217
+ guage models are unsupervised multitask learners.
1218
+ OpenAI blog, 1(8):9.
1219
+ Abigail See, Peter J Liu, and Christopher D Manning.
1220
+ 2017. Get to the point: Summarization with pointer-
1221
+ generator networks. In ACL, pages 1073–1083.
1222
+ Lya Hulliyyatus Suadaa, Hidetaka Kamigaito, Kotaro
1223
+ Funakoshi, Manabu Okumura, and Hiroya Taka-
1224
+ mura. 2021. Towards table-to-text generation with
1225
+ numerical reasoning. In ACL-IJCNLP, pages 1451–
1226
+ 1465.
1227
+ Ran Tian, Shashi Narayan, Thibault Sellam, and
1228
+ Ankur P Parikh. 2019. Sticking to the facts: Con-
1229
+ fident decoding for faithful data-to-text generation.
1230
+ arXiv.
1231
+ Bayu Trisedya, Jianzhong Qi, and Rui Zhang. 2020.
1232
+ Sentence generation for entity description with
1233
+ content-plan attention. In AAAI, volume 34, pages
1234
+ 9057–9064.
1235
+ 10
1236
+
1237
+ Published as a conference paper at EMNLP 2022
1238
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob
1239
+ Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz
1240
+ Kaiser, and Illia Polosukhin. 2017. Attention is all
1241
+ you need. In NeurIPS, pages 5998–6008.
1242
+ Peng Wang, Junyang Lin, An Yang, Chang Zhou,
1243
+ Yichang Zhang, Jingren Zhou, and Hongxia Yang.
1244
+ 2021. Sketch and refine: Towards faithful and in-
1245
+ formative table-to-text generation. In ACL-IJCNLP,
1246
+ pages 4831–4843.
1247
+ Zhenyi Wang, Xiaoyang Wang, Bang An, Dong Yu,
1248
+ and Changyou Chen. 2020. Towards faithful neural
1249
+ table-to-text generation with content-matching con-
1250
+ straints. In ACL, pages 1072–1086.
1251
+ Sam Wiseman, Stuart M Shieber, and Alexander M
1252
+ Rush. 2017.
1253
+ Challenges in data-to-document gen-
1254
+ eration. In EMNLP, pages 2253–2263.
1255
+ Yingce Xia, Fei Tian, Lijun Wu, Jianxin Lin, Tao Qin,
1256
+ Nenghai Yu, and Tie-Yan Liu. 2017. Deliberation
1257
+ networks: Sequence generation beyond one-pass de-
1258
+ coding. NeurIPS, 30:1784–1794.
1259
+ Xinyu Xing and Xiaojun Wan. 2021. Structure-aware
1260
+ pre-training for table-to-text generation.
1261
+ In ACL-
1262
+ IJCNLP, pages 2273–2278.
1263
+ Pengcheng Yin, Graham Neubig, Wen-tau Yih, and Se-
1264
+ bastian Riedel. 2020. Tabert: Pretraining for joint
1265
+ understanding of textual and tabular data. In ACL,
1266
+ pages 8413–8426.
1267
+ Hongzhi Zhang, Yingyao Wang, Sirui Wang, Xuezhi
1268
+ Cao, Fuzheng Zhang, and Zhongyuan Wang. 2020.
1269
+ Table fact verification with structure-aware trans-
1270
+ former. In EMNLP, pages 1624–1629.
1271
+ A
1272
+ Human Evaluation Settings
1273
+ The criteria adopted in our human-based evaluation
1274
+ are (1) Grammar (e.g., is this paragraph grammat-
1275
+ ical?), (2) Coherence & Concise (e.g., is this para-
1276
+ graph coherent and contextually consistent? does
1277
+ it repeat redundant information?), and (3) Factual
1278
+ perspective (e.g., are the facts that this paragraph
1279
+ describes correct? are these facts related to refer-
1280
+ ences and tables?). More specifically, we list the
1281
+ detailed justifications on how to score the generated
1282
+ text in each criterion as follows.
1283
+ Grammar
1284
+ • 1 It is more like garbled code than a paragraph.
1285
+ • 2 There are many obvious grammatical mis-
1286
+ takes.
1287
+ • 3 There are a few obvious grammatical mistakes.
1288
+ • 4 There are few grammatical mistakes.
1289
+ • 5 There are no grammatical mistakes.
1290
+ Coherence & Concise
1291
+ • 1 The logic of text expression is chaotic and
1292
+ nonsense.
1293
+ • 2 There are a lot of logical inconsistencies or
1294
+ redundant information.
1295
+ • 3 There are some logical inconsistencies or re-
1296
+ dundant information.
1297
+ • 4 There are a few logical inconsistencies or
1298
+ redundant information, but it does not affect
1299
+ browsing.
1300
+ • 5 The logic of the text is smooth without redun-
1301
+ dant information.
1302
+ Factual Perspective
1303
+ • 1 The paragraph does not coincide with the ref-
1304
+ erence or table, and it is full of information
1305
+ inconsistent with the facts.
1306
+ • 2 The paragraph describes the facts incorrectly
1307
+ and has a low correlation with reference, but is
1308
+ related to the information in the table.
1309
+ • 3 The paragraph description is incorrect, but it
1310
+ is highly coincident with the reference.
1311
+ • 4 The paragraph description is basically correct,
1312
+ and the coincidence with the reference is low,
1313
+ but it also describes the information in the table.
1314
+ • 5 The paragraph description is correct and
1315
+ highly coincident with the reference.
1316
+ B
1317
+ Illustrative Examples of Generated
1318
+ Descriptions
1319
+ We additionally selected another two examples of
1320
+ the generated table descriptions from the numeric-
1321
+ NLG and Totto datasets, respectively. The results
1322
+ are shown in Figs. 6 and 7. From these four ex-
1323
+ amples, we can see that TASD can generate more
1324
+ accurate and fluent descriptive texts. While incor-
1325
+ rect descriptions can be found in the outcome texts
1326
+ generated by different models for cases D and F,
1327
+ which suggests that generating faithful descriptions
1328
+ for open-domain tables is much more challenging
1329
+ and requires more powerful and, thus larger, pre-
1330
+ trained language models.
1331
+ C
1332
+ Extra Implementation Details
1333
+ The learning rate of GPT2 was searched from {3e−
1334
+ 4, 3e − 5, 3e − 6}. In the evaluation of discussing
1335
+ the limitation of text deliberation (see Section 6),
1336
+ a trade-off parameter for balancing the GPT2 fine-
1337
+ tuning loss and the TRLoss was adopted, then the
1338
+ trade-off parameter was searched from {1e−1, 5e−
1339
+ 2, 1e − 2, 5e − 3, 1e − 3}, and 1e-2 was selected for
1340
+ the reported performance. Besides, the reported
1341
+ results in Tables 4 and 5 were averaged in 3 runs.
1342
+ 11
1343
+
1344
+ Published as a conference paper at EMNLP 2022
1345
+ Figure 6: Generated table descriptions on cases C and D.
1346
+ Figure 7: Generated table descriptions on cases E and F.
1347
+ 12
1348
+
1349
+ robusttc-fsl,
1350
+ et al.(85.8 et al., 2018)
1351
+ matching and prototypical networks
1352
+ relation networks
1353
+ the graph networks
1354
+ arsc mean acc dataset
1355
+ snorkel network providing the best performance
1356
+ our model outperforms the other baselines on mean acc
1357
+ it leads to a significant improvement in mean accuracy for the compared models
1358
+ on the other hand, the absence of semantic representation (a) leads to a significant
1359
+ caption
1360
+ improvement in mean accuracy
1361
+ our proposed model achieves competitive accuracy, comparable to
1362
+ the state-of-the-art models from yu et al. (2018).
1363
+ steve gregory
1364
+ steve gregory
1365
+ seven interceptions seven
1366
+ 13 games
1367
+ 35 tackles
1368
+ sacks
1369
+ tour touchdowns
1370
+ steve gregory
1371
+ 357 tackles seven interceptions three sacks
1372
+ forced fumble.0 yards per game
1373
+ steve gregory ended his career with 357 tackles, seven
1374
+ page_title
1375
+ interceptions, three sacks and 1 forced fumble.0 yards per game ... (repeat)
1376
+ section title
1377
+ steve gregory
1378
+ 357 tackles seven sacks
1379
+ seven interceptionsconsistency score
1380
+ the w/o results
1381
+ consistency and novelty scores
1382
+ w/o adversarial learning results in a full model
1383
+ consistency score of 3.84 and a novelty score of 3.24
1384
+ baseline
1385
+ adversarial adaptation
1386
+ results in a much better consistency than the w/o adaptation
1387
+ caption
1388
+ combined with the w/o Istm, the coherence improves significantly
1389
+ consistency, novelty, diversity,
1390
+ and coherence
1391
+ same consistency
1392
+ dynamic strategy leads to a higher novelty
1393
+ 110 years
1394
+ the district index performed last 57 district index ohio 67 district ( death ) until
1395
+ district used holder index ( years district)
1396
+ alice sanders (12 may 1897 - 7 november 2007)
1397
+ survivors
1398
+ alice sanders (12 mav 1897 - 7 november 2007)
1399
+ alice sargent
1400
+ over 40 years
1401
+ page_title
1402
+ breast
1403
+ and breast breast
1404
+ section title
hdA0T4oBgHgl3EQfIP8h/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
htA0T4oBgHgl3EQfIP9i/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:518685194e48dd1913708b18710a45deab193a57471ee0b6a7370b770e1a70c5
3
+ size 1441837
idAzT4oBgHgl3EQfbPxy/content/tmp_files/2301.01382v1.pdf.txt ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Task-sequencing Simulator: Integrated Machine Learning to Execution
2
+ Simulation for Robot Manipulation
3
+ Kazuhiro Sasabuchi1, Daichi Saito1, Atsushi Kanehira1, Naoki Wake1, Jun Takamatsu1, Katsushi Ikeuchi1
4
+ Abstract— A task-sequencing simulator in robotics manip-
5
+ ulation to integrate simulation-for-learning and simulation-
6
+ for-execution is introduced. Unlike existing machine-learning
7
+ simulation where a non-decomposed simulation is used to
8
+ simulate a training scenario, the task-sequencing simulator runs
9
+ a composed simulation using building blocks. This way, the
10
+ simulation-for-learning is structured similarly to a multi-step
11
+ simulation-for-execution. To compose both learning and execu-
12
+ tion scenarios, a unified trainable-and-composable description
13
+ of blocks called a concept model is proposed and used. Using the
14
+ simulator design and concept models, a reusable simulator for
15
+ learning different tasks, a common-ground system for learning-
16
+ to-execution, simulation-to-real is achieved and shown.
17
+ I. INTRODUCTION
18
+ Simulators are important in robotics. Compared to the
19
+ real world, simulators can run endlessly, safely, remove
20
+ stochasticity, and provide ground truth data. One direction
21
+ for using simulators in robotics is to help check collisions
22
+ and safe executions before real robot executions. Another
23
+ direction is using simulators as a tool for machine learning.
24
+ Simulators that suit either one of the above directions are
25
+ available today, however, it is desirable to have a simulator
26
+ which fulfills both purposes to better integrate learning and
27
+ execution, especially in the multi-step manipulation domain.
28
+ In multi-step manipulation, a series of tasks which occur
29
+ sequentially must be simulated. An example of a sequential
30
+ series of tasks is bringing an object, which is composed
31
+ of tasks: grasp, pick, bring. An execution simulator must
32
+ be able to trigger these different tasks and connect them
33
+ into a sequenced simulation. When the simulator is used
34
+ for checking robot executions, the simulation is a matter of
35
+ combining programmed or trained building blocks (e.g., run
36
+ “grasp” then “pick” then “bring”).
37
+ In contrast, machine learning simulators often ignore the
38
+ task-sequence composition and are structured to train a
39
+ specific problem or benchmark (e.g., a non-decomposed
40
+ “pick-and-place” simulation)[1][2]. This structural differ-
41
+ ence causes a gap between simulation-for-execution and
42
+ simulation-for-learning. The learned results become specific
43
+ to the trained scenario and contradicts with the simulation-
44
+ for-execution where scenarios are non-fixed.
45
+ Instead, a machine learning simulator could be designed
46
+ similar to an execution simulator. The “pick-and-place”
47
+ scenario can be decomposed into a sequenced simulation
48
+ of “grasp then pick then bring then place then release.”
49
+ The difference compared to the execution simulator is that
50
+ 1All
51
+ authors
52
+ are
53
+ with
54
+ Microsoft,
55
+ Redmond,
56
+ WA,
57
+ USA
58
59
+ Physics
60
+ Kinematics
61
+ Rendering
62
+ Post-process
63
+ Task-sequencing simulator
64
+ Concept Interface
65
+ Environment Engine Pipeline
66
+ CM Grasp
67
+ state
68
+ action
69
+ selected task
70
+ at step t
71
+ demonstrated
72
+ execution sequence
73
+ configured
74
+ training sequence
75
+ CM Open
76
+ CM Release
77
+ training
78
+ algorithms
79
+ Fig. 1.
80
+ The proposed task-sequencing simulator which enables scenario
81
+ composition for both learning and execution in robotics manipulation.
82
+ some of these blocks are “under-training” and are updated
83
+ as data is collected. Once the update has finished, the trained
84
+ block can be combined and reused for a different scenario,
85
+ thus, the simulation-for-learning can directly transition to the
86
+ simulation-for-execution. In addition, such design enables
87
+ learning new manipulation skills on top of programmed or
88
+ prior-trained building blocks. For example, a “grasp” could
89
+ be trained using the sequence “grasp then pick,” where
90
+ “pick” is a programmed task to provide a supervised signal
91
+ (i.e., teach that the “grasp” was successful if the “pick” was
92
+ successful).
93
+ This article introduces a task-sequencing simulator struc-
94
+ ture which enables integrated learning-to-execution simula-
95
+ tion. At its core, the simulator uses a unified block design
96
+ called the “concept model,” which is proposed within this
97
+ article and defines the necessary descriptions for training
98
+ a task, collecting trained tasks, and running the tasks to
99
+ compose a sequence.
100
+ The rest of the article is outlined as below: Section II
101
+ provides a background on existing robotic simulators. Sec-
102
+ tion III explains the overall simulator structure for achieving
103
+ machine learning to robot execution simulation. Section IV
104
+ explains the concept model core component of the simulator
105
+ and Section V provides some detailed example implementa-
106
+ tions of the model. Section VI shows the capabilities of the
107
+ arXiv:2301.01382v1 [cs.RO] 3 Jan 2023
108
+
109
+ C16simulator in machine-learning-to-robot-execution followed
110
+ by conclusions in Section VII.
111
+ II. BACKGROUND
112
+ While there are many existing simulators for robotics,
113
+ existing simulators may not achieve the integrated learning-
114
+ to-execution multi-step manipulation purpose for one of
115
+ the following reasons: 1) the simulator targets a different
116
+ domain other than manipulation, 2) the simulator can be
117
+ used for manipulation but misses a capability in simulation-
118
+ for-learning, 3) the simulator can be used for manipulation
119
+ but misses a capability in simulation-for-execution, 4) the
120
+ simulator can be used for manipulation and both learning-
121
+ and-execution purposes but not specifically for learning-to-
122
+ execution purposes.
123
+ Popular
124
+ robotics
125
+ simulators[3][4]
126
+ include
127
+ Gazebo[5],
128
+ MuJoCo[6], CoppeliaSim[7], CARLA[8], AirSim[9], and
129
+ Webots[10]. Gazebo has its advantage in its capability to
130
+ simulate executions using ROS integrated sensors and ac-
131
+ tuators but is not the best choice when it comes to data
132
+ collection and machine learning due to its slow simulation
133
+ performance and inconsistency in physics simulation. Thus,
134
+ Gazebo falls into the second category. Engines like MuJoCo
135
+ on the other hand, are suitable for stable physics simulation
136
+ in machine learning but miss some robotics simulation
137
+ capabilities such as inverse-kinematics and visual feedback
138
+ (realistic rendering). The focus is on physics simulation
139
+ rather than an integrated simulator for robot executions,
140
+ therefore falls into the third category. CARLA and Airsim
141
+ mainly target automobiles such as drones and cars therefore
142
+ miss some important features such as kinematics required for
143
+ manipulation and falls into the first category.
144
+ The CoppeliaSim is an integrated simulator with a kine-
145
+ matics and physics engine, and the PyRep toolkit[11] can
146
+ be used with the simulator for machine learning. WeBots
147
+ is also an integrated simulator and frameworks such as
148
+ Deepbots[12] help the simulator to be used for machine
149
+ learning. The machine learning features of these simulators
150
+ are external features that have been developed within the
151
+ community. While it is possible to use these simulators
152
+ for both execution and learning purposes, they have not
153
+ been designed for integrated learning-to-execution but rather
154
+ using for one-or-the-other purpose. That is, these simulators
155
+ are not designed to connect learning-and-execution, rather,
156
+ learning and execution are separate use cases where one uses
157
+ a community provided wrapper for machine learning, and the
158
+ other uses the integrated features to simulate a robotic system
159
+ execution.
160
+ Compared to the existing simulators, the task-sequencing
161
+ simulator was designed to connect simulation-for-learning
162
+ and simulation-for-execution The simulator uses a con-
163
+ cept model which enables composition of pre-trained, pro-
164
+ grammed, or trained tasks, which is a powerful feature for
165
+ going from machine learning to real robot execution. (e.g.,
166
+ such as plugging-in to machine learning platforms but then
167
+ connecting to execute on ROS). More importantly, tied-
168
+ integration allows features such as training using pre-sequent
169
+ and post-sequent task executions, but also collecting reusable
170
+ execution modules through training.
171
+ III. TASK-SEQUENCING SIMULATOR OVERVIEW
172
+ The task-sequencing simulator has two layers: the Concept
173
+ Interface for “action decision” and the Environment Engine
174
+ Pipeline for “state observing” (Figure 1). However, unlike
175
+ a typical learning simulator, where a specific problem has
176
+ a non-decomposable structure and the action decision is
177
+ a single policy being updated as data is collected for the
178
+ problem, the task-sequencing simulator adds an abstraction
179
+ to this action decision so that the problem is composed of
180
+ a sequence of tasks (i.e., switches between a collection of
181
+ tasks, where each task runs an individual policy), This way,
182
+ a learned task policy can become part of a collection of
183
+ policies for execution once the training has finished. Further
184
+ details of each layer are described below.
185
+ A. concept interface
186
+ At each simulation time-step, a robot decides the next
187
+ action depending on the current state of the world. This
188
+ decision is referred to as a policy. When the relation be-
189
+ tween the state, action, and next state (system dynamics) is
190
+ completely known, this policy can be directly programmed.
191
+ When the system dynamics are unknown, either the learning
192
+ of the policy or system dynamics is required through data
193
+ collection. Data collection is efficient if collected only for
194
+ the unknown dynamics and if known dynamics are directly
195
+ computed. Therefore, it is preferable to break down a robot’s
196
+ execution to a series of tasks, where each task executes its
197
+ own policy optimal for the system dynamics the task is cover-
198
+ ing. In addition, breaking down a robot’s execution increases
199
+ the reusability of each task policy and allows composing
200
+ different execution scenarios from the task building blocks.
201
+ The Concept Interface layer chooses and switches between
202
+ the tasks for a training or execution scenario assuming
203
+ (1) the series of tasks to simulate is known (ways for
204
+ knowing are shown in the experiments), and (2) each task
205
+ policy can indicate when the task has been completed (i.e.,
206
+ has a learned or programmed completion signal). During
207
+ simulation-for-execution, the Concept Interface layer chooses
208
+ the according task in a sequence and switches to the next
209
+ task once the current policy returns a task completion signal.
210
+ The simulation-for-learning can be conducted in a similar
211
+ way, except, the policy’s output of the completion signal is
212
+ evaluated (e.g., by the success of a subsequent task). This
213
+ interchangeable structure enables an integrated learning-to-
214
+ execution simulation. To learn the termination signal under
215
+ an arbitrary training sequence, the tasks share a unified
216
+ design called a “concept model,” which is further explained
217
+ in the later sections.
218
+ B. environment engine pipeline
219
+ At a time-step control scale, a robot behaves under a
220
+ sense-plan-act with a physical embodiment. Thus, simulation
221
+ in robotics requires three important engines: kinematics,
222
+ physics (contact dynamics), and rendering. The role of
223
+
224
+ the kinematics engine is to do the simulation between the
225
+ robot’s action plan and the movement of the actual body.
226
+ The role of the physics engine is to do the simulation
227
+ between the robot’s body and the environment. The role of
228
+ the rendering engine is to do the simulation between the
229
+ environment and the robot’s sensing. In more technical terms,
230
+ the kinematics engine solves the mapping between cartesian
231
+ space and configuration space, the physics engine solves the
232
+ differential algebraic equation[13] using techniques such as
233
+ velocity-impulse linear complementarity-based time-stepping
234
+ methods[14][15], the rendering engine solves the rendering
235
+ equation[16] about lighting paths for pixel color generation.
236
+ While rendering engines simulate the robot’s sensing by
237
+ generating images, there is a gap between sensing and
238
+ perceiving (extracting meaningful states from the gener-
239
+ ated images). Rather than directly learning the sensing-to-
240
+ planning, sometimes it is more efficient to perceive-before-
241
+ planning and extract visual features. Moreover, in robotics
242
+ it is important to combine both visual and force feedback;
243
+ the visual features help compress the feedback so that the
244
+ vision and force have an aligned state dimension. Thus, the
245
+ proposed simulator adds a fourth “post-process engine” in
246
+ conjunction with the rendering engine.
247
+ These different-role engines are triggered in an ordered
248
+ pipeline to calculate the current state of the simulation world.
249
+ Often, simulators package specific engines to produce a
250
+ single simulation world, but in general, these engines could
251
+ run separately and combine/orchestrate multiple simulation
252
+ worlds to produce better simulation. For example, ROS
253
+ MoveIt could be used for accurate inverse kinematics sim-
254
+ ulation, PyBullet for reproducible physics simulation, and
255
+ the Unreal Engine for photo-realistic ray-traced rendering.
256
+ Combining different engines is possible as long as each
257
+ engine is able to load the same models of the robot/objects
258
+ and is able to share the robot and object states among each
259
+ engine (which can be done using TCP connections etc.).
260
+ In addition, instead of using simulation engines, it is also
261
+ possible to connect “real” engines which replace simulated
262
+ physics with the real robot’s torque sensors and simulated
263
+ rendering with real images from the robot’s camera.
264
+ IV. CONCEPT MODELS
265
+ A single task block operates under some system dynamics
266
+ and achieves a goal state from an initial state. Thus, the de-
267
+ tails of a task can be described using the actors of the system,
268
+ an initial state, a goal (end) state, and the parameters of the
269
+ system dynamics (Figure 2). This kind of task description
270
+ can be referred to as a “task model”[17]. This description
271
+ is enough for executing a task if the system dynamics are
272
+ completely known. The initial state is usually the end state
273
+ of the previous task. However, when the dynamics are not
274
+ fully known, learning is required, and during learning, the
275
+ initial state must be randomized.
276
+ Instead of fully describing the task, a task can be described
277
+ using actor configurations, an initial state, a necessary goal
278
+ state that is described from observable system states, a
279
+ sufficient goal state that is described from non-observable
280
+ Task Model
281
+ initial state
282
+ Action
283
+ Programmed policy
284
+ Actors
285
+ obtained before execution
286
+ Parameters
287
+ completely known and obtained
288
+ before execution
289
+ defined goal state
290
+ Fig. 2.
291
+ An illustration of a task model.
292
+ Concept Model
293
+ initial state
294
+ Action
295
+ Policy
296
+ Actors
297
+ a set of configurated settings
298
+ Parameters
299
+ partially known and randomized at
300
+ training, estimated at execution
301
+ necessary goal state
302
+ sufficient goal state
303
+ Fig. 3.
304
+ An illustration of a concept model.
305
+ system states, and the partially known parameters of the
306
+ system dynamics (Figure 3). This kind of task description
307
+ will be referred to as a “concept model” which compared to
308
+ the task model may not be concrete enough for execution, but
309
+ describes the concepts of the task to learn the execution. In
310
+ the special case where the system dynamics are fully known
311
+ and the task is programmable, the descriptions of the concept
312
+ model is identical to the task model.
313
+ By providing a structured description format, the Con-
314
+ cept Interface layer can access these blocks interchangeably
315
+ within a training sequence as the structures are the same and
316
+ only differ in the details.
317
+ A. concept model usage in learning
318
+ The concept model descriptions are used to learn the
319
+ task completion signal as well as the actions to handle the
320
+ unknown system dynamics. The necessary goal state and
321
+ sufficient goal state descriptions are used to evaluate whether
322
+ the task completion signal and actions are appropriate. The
323
+ evaluation is done by minimizing the cost of the current
324
+ states to the goal states. Note that the cost to the necessary
325
+ goal state is evaluated after every action decision, whereas
326
+ the cost to the sufficient goal state is only evaluated once a
327
+ task completion signal is chosen during training.
328
+ The actor configurations describe the possible environ-
329
+ ments (the world including the robot and any manipulating
330
+ target) for training the task. If there are no pre-sequent tasks
331
+ involved for training, then one of the actor configurations is
332
+ used to define the initial state of the tasks. Otherwise, the
333
+ end state of the previous task is the initial state. Unlike the
334
+ initial state, the actor configuration is independent from the
335
+ states of the previous task, thus is configurable and can be
336
+ used for randomizing the states for training.
337
+
338
+ B. concept model usage in execution
339
+ When the dynamics are fully known, the concept model
340
+ acts the same as the task model. The state of the task changes
341
+ from the initial state using actions from a programmed policy
342
+ based on the system dynamics and actors. The initial state
343
+ of the task during execution is the end state of the previous
344
+ task and the task ends once the goal state is achieved.
345
+ When the dynamics are not fully known, the observable
346
+ system states of the task changes using a learned policy.
347
+ Similar to the programmed case, the initial state is the
348
+ observable states at the end of the previous task. However,
349
+ since part of the goal state is non-observable, the end of the
350
+ task cannot be identified just with the model descriptions.
351
+ Instead, the learned task completion signal from the concept
352
+ model descriptions is used to identify the end of the task.
353
+ V. MODEL IMPLEMENTATIONS
354
+ By following the concept model structure, a task is im-
355
+ plemented in a way that can be trained but then collected
356
+ as a building block for execution. In this article, the screw
357
+ theory[18] based separation of dynamics[17] is used to
358
+ separate a task from another task. That is, once the relation
359
+ between the manipulation target and the robot’s end-effector
360
+ is initialized, a task breaks or maintains a contact state
361
+ between the target and the environment. By classifying the
362
+ inequality equation patterns of contact points, this leads to
363
+ seven pure translation tasks and seven pure rotation tasks.
364
+ Figure 4 shows implementation examples of some of these
365
+ tasks as concept models. Below describes the details of some
366
+ of the examples in the figure. Note that as the task classifica-
367
+ tion only depends on the relation between the end-effector,
368
+ manipulation target, and the environment, the movement of
369
+ the arm (configuration space) can be ignored[19] and the task
370
+ only focuses on the movement of the end-effector (cartesian
371
+ space).
372
+ A. grasping
373
+ The
374
+ grasp
375
+ task
376
+ initiates
377
+ the
378
+ relation
379
+ between
380
+ the
381
+ manipulation-target and the robot’s end-effector. The actors
382
+ are a target object, an environment (e.g., table), and the
383
+ end-effector. The initial state is where the target object is
384
+ attached to the environment but not attached to the end-
385
+ effector (including shape of the finger joints). The goal state
386
+ is where the target object is also attached to the end-effector
387
+ in a way such that enough force is exerted for performing a
388
+ subsequent task. The parameters are the distance between the
389
+ target and end-effector as well as the approaching direction
390
+ of the end-effector.
391
+ When details of the target object are completely known, a
392
+ task model can be defined and programmed from the above
393
+ details. However, in the real world, there is uncertainty in the
394
+ shape of the object and distance to the target, distinguishing
395
+ whether enough force is exerted is intractable due to the
396
+ inaccuracy in the real contact sensors or lack of sensors to
397
+ detect slipping, a grasp-failure due to finger-object collision
398
+ during approach may occur if the policy is not carefully
399
+ designed under the uncertainties of the object properties.
400
+ Grasp Concept Model
401
+ freed OBJ-EEF
402
+ Action
403
+ Policy
404
+ Actors
405
+ end-effector (EEF), environment (ENV), a set
406
+ of objects (OBJ) with randomized shapes
407
+ Parameters
408
+ estimated approach distance OBJ-EEF,
409
+ approach direction of EEF
410
+ NGS: attached OBJ-EEF
411
+ SGS: next task success
412
+ Open Concept Model
413
+ initial OBJ pose
414
+ Action
415
+ Policy
416
+ Actors
417
+ end-effector (EEF), environment (ENV), a set
418
+ of objects (OBJ) with randomized parameters
419
+ Parameters
420
+ estimated radius, axis center, axis direction
421
+ to maintain constraint OBJ-ENV
422
+ NGS: maintain OBJ-ENV
423
+ SGS: desired OBJ pose
424
+ Pick Concept Model
425
+ attached OBJ-ENV
426
+ Action
427
+ Programmed
428
+ Actors
429
+ end-effector (EEF), environment (ENV), target
430
+ object (OBJ)
431
+ Parameters
432
+ detach direction to break constraint OBJ-ENV
433
+ freed OBJ, ENV
434
+ Bring Concept Model
435
+ initial OBJ pose
436
+ Action
437
+ Programmed
438
+ Actors
439
+ end-effector (EEF), target object (OBJ)
440
+ Parameters
441
+ move direction and distance of OBJ
442
+ desired OBJ pose
443
+ Place Concept Model
444
+ freed OBJ, ENV
445
+ Action
446
+ Programmed
447
+ Actors
448
+ end-effector (EEF), environment (ENV), target
449
+ object (OBJ)
450
+ Parameters
451
+ attach direction to create constraint OBJ-ENV
452
+ attached OBJ-ENV
453
+ Release Concept Model
454
+ attached OBJ-EEF
455
+ Action
456
+ Programmed
457
+ Actors
458
+ end-effector (EEF), target object (OBJ)
459
+ Parameters
460
+ release distance OBJ-EEF,
461
+ release direction of EEF
462
+ freed OBJ, EEF
463
+ Pour Concept Model
464
+ initial ENV state
465
+ Action
466
+ Policy
467
+ Actors
468
+ end-effector (EEF), set of environment (ENV)
469
+ and object (OBJs) with randomized parameters
470
+ Parameters
471
+ estimated ENV (cup) filled state,
472
+ estimated OBJ (pitcher) size,
473
+ estimated axis center, direction of OBJ-ENV
474
+ NGS: maintain OBJ-ENV
475
+ SGS: desired ENV state
476
+ Wipe Concept Model
477
+ initial ENV state
478
+ Action
479
+ Policy
480
+ Actors
481
+ end-effector (EEF), set of environment (ENV)
482
+ and object (OBJs) with randomized parameters
483
+ Parameters
484
+ estimated ENV (plane) clean state,
485
+ estimated normal axis of OBJ-ENV
486
+ NGS: maintain OBJ-ENV
487
+ SGS: desired ENV state
488
+ Fig. 4. Example concept models of eight different tasks in the screw-theory
489
+ based classification.
490
+ Instead, a set of actor configurations is defined as object
491
+ shapes from a randomized range, a necessary goal state is
492
+ defined as the end-effector to be in contact with the target
493
+ object on an appropriate surface (which can be obtained on
494
+ the real robot with the finger configurations and finger-torque
495
+ sensors with a threshold to determine a binary contacted-or-
496
+ not-contacted state), a sufficient goal state is defined as a
497
+ successful performance of a subsequent task, the estimated
498
+ distance is used for the parameters. The defined goal states
499
+ are used to formulate the reward (cost-to-go) for learning the
500
+ policy.
501
+ Using this concept model, the approaching strategy (ad-
502
+ justed movement around the approaching direction of the
503
+ end-effector) and the enough amount of “closing” of the
504
+ fingers to perform a subsequent task is learned. The learned
505
+ policy chooses the sufficient amount based on the object
506
+ shape which can partially be inferred by the shape of the end-
507
+ effector finger joints once touching the object. The policy
508
+ returns a termination signal once reached the enough amount
509
+ of closing.
510
+ B. door-opening
511
+ The door-opening task is a one degrees-of-freedom pure
512
+ rotation task. The actors are a target object (the door), an
513
+ environment (the hinge), and the end-effector (attached to the
514
+
515
+ door handle). The initial state is where the end-effector and
516
+ target object are at an attached state. The goal state is where
517
+ the target object has moved to some desired orientation. The
518
+ parameters are the rotation radius, the rotation axis center,
519
+ and the rotation axis direction defined by the target and
520
+ environment.
521
+ When details of the target and environment are completely
522
+ known, a task model can be defined and programmed from
523
+ the above details. However, in the real world, there is
524
+ uncertainty in the environment parameters. Instead, a set of
525
+ actor configurations randomizing the radius, a necessary goal
526
+ state that moves the object along the environment constraint
527
+ at each time-step (which can be obtained on the real robot
528
+ by using a force sensor on the wrist and checking against
529
+ a maximum-stress threshold), a sufficient goal state that
530
+ ensures the door has reached the desired orientation, and
531
+ estimation of the parameters are used to describe the concept
532
+ model.
533
+ By using an end-effector with only force-sensor feedback
534
+ on the wrist, this model enables learning a policy which
535
+ updates parameter estimations at each time-step, and then
536
+ generates a hand motion based on the updated parameters.
537
+ The policy returns a termination signal once inferred that the
538
+ desired orientation has been reached.
539
+ C. bringing
540
+ The bringing task is a six degrees-of-freedom translation
541
+ and rotation task. The actors are the target object and the
542
+ end-effector. The initial state is where the end-effector and
543
+ target object are at an attached state. The goal state is where
544
+ the target object has moved to some relative positioning. The
545
+ parameters are the moving direction and distance.
546
+ Since there are no uncertainties in the target or environ-
547
+ ment, the parameters can be manually specified and the goal
548
+ can be directly specified from the parameters, Thus, the
549
+ concept model is identical to the task model and can be
550
+ programmed.
551
+ VI. EXPERIMENTS
552
+ Experiments were conducted using the concept model
553
+ implementations shown in the previous section, and the
554
+ developed task-sequencing simulator.
555
+ For the learning experiments, the series of tasks to simulate
556
+ were predefined. These experiments were performed to show
557
+ the effectiveness of the simulator and its reusability for
558
+ training different tasks.
559
+ By running the pre-sequent tasks of the task-to-train at
560
+ the start of an episode, and by running the subsequent tasks
561
+ at reward return, the simulator is compatible with common
562
+ reinforcement learning platforms, and utilizes off-the-shelf
563
+ learning algorithms. For the experiments, the simulator was
564
+ connected to the Bonsai platform and used the PPO algo-
565
+ rithm.
566
+ For the execution experiments, the series of tasks were
567
+ obtained from human demonstrations and the actions were
568
+ generated using the learned task policies. These experiments
569
+ were performed to show the effectiveness of the simulator for
570
+ execution simulations (execution of different composed sce-
571
+ narios). Note that the exact same simulator and task blocks
572
+ used for training was used for this experiment, showing
573
+ the simulator’s capability to transition from simulation-for-
574
+ learning to simulation-for-execution.
575
+ For simulation, states were obtained by plugging to the
576
+ environment engine pipeline the PyBullet engine for the
577
+ physics engine role, and the Unreal Engine for rendering. For
578
+ the real robot, states were obtained plugging ROS (connected
579
+ via roslibpy) to the environment engine pipeline. For the arm
580
+ kinematics, the ROS MoveIt package was used.
581
+ A. training
582
+ Figure 5-(A) shows the task-sequencing simulator config-
583
+ urations for running the grasp training. A configured training
584
+ sequence “grasp then pick” is passed to the simulator. The
585
+ “grasp” task is the task-to-train, and a programmed “pick”
586
+ is used as the subsequent task for evaluating the sufficient
587
+ goal state.
588
+ Figure 5-(B) shows the trained grasp results performed
589
+ on a real robot. The concept model was designed with
590
+ the object shape parameters as unknown. Regardless of
591
+ such uncertainty, the learned policy successfully grasps the
592
+ different shaped objects including but not limited to a box,
593
+ a cylindrical cup, an oval rice pack, and a diamond-shaped
594
+ candy box.
595
+ Figure 5-(C) shows a learned grasp for a different robot
596
+ hand, which was trained using the same simulator and
597
+ concept models but with a different actor configuration
598
+ (end-effector) setting. The results show the reusability of
599
+ the simulator for training different robots with different
600
+ mechanics (a hand with multiple fingers and a gripper with
601
+ limited degrees of freedom).
602
+ Figure 5-(D) shows that by changing the configured train-
603
+ ing sequence to “grasp then open,” the door-opening task is
604
+ trained using the same simulator. The “open” task is the task-
605
+ to-train, and the trained “grasp” is reused as the pre-sequent
606
+ task for initiating the relation between the end-effector and
607
+ the target door. Regardless of uncertainty in the rotation
608
+ radius, center, and axis direction, the real robot performed the
609
+ door-opening using the learned policy. Although the policy
610
+ was trained only using simulated data, the policy is directly
611
+ applicable to the real robot as the sufficient goal state does
612
+ not require observability on the real robot and because the
613
+ policy action decisions only rely on the states with very small
614
+ sim-to-real gaps.
615
+ B. execution
616
+ Figure 6-(A) shows the task-sequencing simulator used
617
+ with a demonstrated sequence by a human. Instead of a
618
+ configured sequence as in the previous training experiments,
619
+ the sequence is automatically generated through demonstra-
620
+ tion decomposition using the method described in [20]. The
621
+ same concept models from the training experiments are used
622
+ with the policy-update being disabled (the simulator is not
623
+ connected to any training algorithm and instead uses a fixed
624
+ learned policy without updates).
625
+
626
+ Task-sequencing simulator
627
+ Concept Interface
628
+ Environment Engine Pipeline
629
+ CM Grasp
630
+ state
631
+ action
632
+ configured
633
+ training sequence
634
+ CM Pick
635
+ training
636
+ algorithms
637
+ (A)
638
+ (B)
639
+ (C)
640
+ connect learned policy
641
+ to the real robot
642
+ train policy on a
643
+ different robot hand
644
+ CM Grasp
645
+ CM Open
646
+ training sequence
647
+ for door-opening
648
+ (D)
649
+ configure to a different
650
+ training scenario
651
+ Fig. 5.
652
+ Results of the task-sequencing simulator when used for learning.
653
+ Task-sequencing simulator
654
+ Concept Interface
655
+ Environment Engine Pipeline
656
+ CM Grasp
657
+ state
658
+ action
659
+ demonstrated execution sequence
660
+ CM Pick
661
+ (A)
662
+ simulation
663
+ CM Bring
664
+ CM Bring
665
+ CM Place
666
+ CM Release
667
+ real
668
+ another
669
+ scenario
670
+ physics
671
+ rendering
672
+ (B)
673
+ (C)
674
+ (D)
675
+ Fig. 6.
676
+ Results of the task-sequencing simulator when used for execution.
677
+ Figure 6-(B) shows a simulated execution of the demon-
678
+ strated sequence. The first row shows the outputs of the
679
+ physics engine and the second row shows the outputs of
680
+ the rendering engine. As both training and execution run
681
+ on the same system, the learned policy can easily be used
682
+ as a simulation-for-execution. The learned policy is already
683
+ a building block that can be combined with other tasks to
684
+ generate an application such as “pick up a cup from the upper
685
+ shelf and re-place it to the bottom shelf.”
686
+ Figure 6-(C) shows a real robot execution of the demon-
687
+ strated sequence by switching the engines in the environment
688
+ engine pipeline to connect with ROS. This shows how the
689
+ simulator can go from the simulated robot execution to the
690
+ real robot execution by using the same policy connections
691
+ but by changing the engines in which the states are obtained,
692
+ and the actions are performed against. Usually, going from
693
+ simulation to real introduces a sim-to-real gap. However,
694
+ only part of the scenario sequence uses a learned policy and
695
+ due to the careful design of the concept models to divide
696
+ learning observable dynamics (necessary goal states) from
697
+ learning hidden dynamics (sufficient goal states), no such
698
+ gap was encountered.
699
+ Figure 6-(D) shows an execution of a different sequence
700
+ “pick up a cup from the table and throw it in the trash.”
701
+ This scenario uses the same concept models and only differs
702
+ in the demonstrated input, showing how using the simulator
703
+ and concept model descriptions enable reusing the learned
704
+ policies for different execution scenarios. If a policy was
705
+ learned against a full “pick-and-place” scenario, the policy
706
+ would not easily scale to the “pick-and-throw” scenario as
707
+ the problem dynamics are different.
708
+
709
+ VII. CONCLUSIONS
710
+ This article introduced the task-sequencing simulator
711
+ which bridges simulation-for-learning to simulation-for-
712
+ execution. The simulation scenario for learning is created us-
713
+ ing a sequence of tasks. This way the simulation-for-learning
714
+ has the same structure as the simulation-for-execution. At
715
+ its core, the simulator uses a concept model which enables
716
+ sequencing mixed programmed, trained, and under-training
717
+ building blocks. While the simulator has a large advantage
718
+ in terms of integrated system development, the simulator
719
+ also provides new directions for simulation in execution and
720
+ simulation in learning.
721
+ From an execution perspective, the simulator allows com-
722
+ posing a task-sequence using both programmed and trained
723
+ tasks. Unlike programmed-only sequences, the advantage of
724
+ mixing trained blocks is that, some of the tasks can contain
725
+ uncertainty and the goal state of a task can be described using
726
+ implicit system parameters (the goal state does not have to
727
+ be obtained directly from the real robot). The key is that,
728
+ whether the observed state and selected actions suffice the
729
+ goal state is learned as a termination signal through training.
730
+ From a learning perspective, the simulator and concept
731
+ model design have the following advantages: First, the sim-
732
+ ulation is reusable and easily applicable to slight changes
733
+ in the scenario. A policy for a different end-effector can
734
+ be learned by just changing the actor configurations in the
735
+ concept model. A policy can be optimized for different
736
+ scenarios by just changing the subsequent task in the suffi-
737
+ cient goal state. Second, defining the learning problem using
738
+ the concept model design enables a hierarchical learning-
739
+ structure as well as a structure for reducing sim-to-real gaps.
740
+ Any state parameters that do not have a large gap when
741
+ observed with the real robot is used for defining the necessary
742
+ goal state, whereas any state parameters that have a large gap
743
+ when observed with the real robot is a sufficient goal state
744
+ (implicitly learned in simulation but no need to be observed
745
+ with the real robot). This type of formulation is possible
746
+ as only the parts with uncertainty are being learned instead
747
+ of learning the entire scenario sequence. Following this
748
+ structured formulation has allowed going from simulation
749
+ to real without any extra real-world data collection and
750
+ achieving a reusable policy applicable to different execution
751
+ scenarios.
752
+ ACKNOWLEDGMENT
753
+ The authors thank Brice Chung’s team, Aydan Aksoylar
754
+ and Kartavya Neema for their help in the reward designs and
755
+ training of the concept models used in the experiments.
756
+ REFERENCES
757
+ [1] Matthias Plappert, Marcin Andrychowicz, Alex Ray, Bob McGrew,
758
+ Bowen Baker, Glenn Powell, Jonas Schneider, Josh Tobin, Maciek
759
+ Chociej, Peter Welinder, et al.
760
+ Multi-goal reinforcement learning:
761
+ Challenging robotics environments and request for research.
762
+ arXiv
763
+ preprint arXiv:1802.09464, 2018.
764
+ [2] Linxi Fan, Yuke Zhu, Jiren Zhu, Zihua Liu, Orien Zeng, Anchit Gupta,
765
+ Joan Creus-Costa, Silvio Savarese, and Li Fei-Fei.
766
+ Surreal: Open-
767
+ source reinforcement learning framework and robot manipulation
768
+ benchmark. In Conference on Robot Learning, pages 767–782. PMLR,
769
+ 2018.
770
+ [3] Jack Collins, Shelvin Chand, Anthony Vanderkop, and David Howard.
771
+ A review of physics simulators for robotic applications. IEEE Access,
772
+ 9:51416–51431, 2021.
773
+ [4] Wenshuai Zhao, Jorge Pe˜na Queralta, and Tomi Westerlund. Sim-to-
774
+ real transfer in deep reinforcement learning for robotics: a survey. In
775
+ 2020 IEEE Symposium Series on Computational Intelligence (SSCI),
776
+ pages 737–744. IEEE, 2020.
777
+ [5] Nathan Koenig and Andrew Howard. Design and use paradigms for
778
+ gazebo, an open-source multi-robot simulator. In 2004 IEEE/RSJ Inter-
779
+ national Conference on Intelligent Robots and Systems (IROS)(IEEE
780
+ Cat. No. 04CH37566), volume 3, pages 2149–2154. IEEE, 2004.
781
+ [6] Emanuel Todorov, Tom Erez, and Yuval Tassa. Mujoco: A physics
782
+ engine for model-based control.
783
+ In 2012 IEEE/RSJ international
784
+ conference on intelligent robots and systems, pages 5026–5033. IEEE,
785
+ 2012.
786
+ [7] Eric Rohmer, Surya PN Singh, and Marc Freese. V-rep: A versatile and
787
+ scalable robot simulation framework. In 2013 IEEE/RSJ International
788
+ Conference on Intelligent Robots and Systems, pages 1321–1326.
789
+ IEEE, 2013.
790
+ [8] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez,
791
+ and Vladlen Koltun.
792
+ Carla: An open urban driving simulator.
793
+ In
794
+ Conference on robot learning, pages 1–16. PMLR, 2017.
795
+ [9] Shital Shah, Debadeepta Dey, Chris Lovett, and Ashish Kapoor.
796
+ Airsim: High-fidelity visual and physical simulation for autonomous
797
+ vehicles. In Field and service robotics, pages 621–635. Springer, 2018.
798
+ [10] Olivier Michel. Cyberbotics ltd. webots™: professional mobile robot
799
+ simulation.
800
+ International Journal of Advanced Robotic Systems,
801
+ 1(1):5, 2004.
802
+ [11] Stephen James, Marc Freese, and Andrew J Davison. Pyrep: Bringing
803
+ v-rep to deep robot learning. arXiv preprint arXiv:1906.11176, 2019.
804
+ [12] Manos Kirtas, Konstantinos Tsampazis, Nikolaos Passalis, and Anas-
805
+ tasios Tefas. Deepbots: A webots-based deep reinforcement learning
806
+ framework for robotics. In IFIP International Conference on Artificial
807
+ Intelligence Applications and Innovations, pages 64–75. Springer,
808
+ 2020.
809
+ [13] Edward J Haug. Computer aided kinematics and dynamics of mechan-
810
+ ical systems. Vol. 1: basic methods. Allyn & Bacon, Inc., 1989.
811
+ [14] David E Stewart and Jeffrey C Trinkle.
812
+ An implicit time-stepping
813
+ scheme for rigid body dynamics with inelastic collisions and coulomb
814
+ friction. International Journal for Numerical Methods in Engineering,
815
+ 39(15):2673–2691, 1996.
816
+ [15] Mihai Anitescu and Florian A Potra. Formulating dynamic multi-rigid-
817
+ body contact problems with friction as solvable linear complementarity
818
+ problems. Nonlinear Dynamics, 14(3):231–247, 1997.
819
+ [16] James T Kajiya. The rendering equation. In Proceedings of the 13th
820
+ annual conference on Computer graphics and interactive techniques,
821
+ pages 143–150, 1986.
822
+ [17] Katsushi Ikeuchi, Naoki Wake, Riku Arakawa, Kazuhiro Sasabuchi,
823
+ and Jun Takamatsu.
824
+ Semantic constraints to represent common
825
+ sense required in household actions for multi-modal learning-from-
826
+ observation robot. arXiv preprint arXiv:2103.02201, 2021.
827
+ [18] MS Ohwovoriole and B Roth. An extension of screw theory. 1981.
828
+ [19] Kazuhiro Sasabuchi, Naoki Wake, and Katsushi Ikeuchi. Task-oriented
829
+ motion mapping on robots of various configuration using body role
830
+ division. IEEE Robotics and Automation Letters, 6(2):413–420, 2020.
831
+ [20] Naoki Wake, Iori Yanokura, Kazuhiro Sasabuchi, and Katsushi
832
+ Ikeuchi.
833
+ Verbal
834
+ focus-of-attention
835
+ system
836
+ for
837
+ learning-from-
838
+ observation. In 2021 IEEE International Conference on Robotics and
839
+ Automation (ICRA), pages 10377–10384. IEEE, 2021.
840
+
idAzT4oBgHgl3EQfbPxy/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf,len=349
2
+ page_content='Task-sequencing Simulator: Integrated Machine Learning to Execution Simulation for Robot Manipulation Kazuhiro Sasabuchi1, Daichi Saito1, Atsushi Kanehira1, Naoki Wake1, Jun Takamatsu1, Katsushi Ikeuchi1 Abstract— A task-sequencing simulator in robotics manip- ulation to integrate simulation-for-learning and simulation- for-execution is introduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
3
+ page_content=' Unlike existing machine-learning simulation where a non-decomposed simulation is used to simulate a training scenario, the task-sequencing simulator runs a composed simulation using building blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
4
+ page_content=' This way, the simulation-for-learning is structured similarly to a multi-step simulation-for-execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
5
+ page_content=' To compose both learning and execu- tion scenarios, a unified trainable-and-composable description of blocks called a concept model is proposed and used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
6
+ page_content=' Using the simulator design and concept models, a reusable simulator for learning different tasks, a common-ground system for learning- to-execution, simulation-to-real is achieved and shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
7
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
8
+ page_content=' INTRODUCTION Simulators are important in robotics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
9
+ page_content=' Compared to the real world, simulators can run endlessly, safely, remove stochasticity, and provide ground truth data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
10
+ page_content=' One direction for using simulators in robotics is to help check collisions and safe executions before real robot executions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
11
+ page_content=' Another direction is using simulators as a tool for machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
12
+ page_content=' Simulators that suit either one of the above directions are available today, however, it is desirable to have a simulator which fulfills both purposes to better integrate learning and execution, especially in the multi-step manipulation domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
13
+ page_content=' In multi-step manipulation, a series of tasks which occur sequentially must be simulated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
14
+ page_content=' An example of a sequential series of tasks is bringing an object, which is composed of tasks: grasp, pick, bring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
15
+ page_content=' An execution simulator must be able to trigger these different tasks and connect them into a sequenced simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
16
+ page_content=' When the simulator is used for checking robot executions, the simulation is a matter of combining programmed or trained building blocks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
17
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
18
+ page_content=', run “grasp” then “pick” then “bring”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
19
+ page_content=' In contrast, machine learning simulators often ignore the task-sequence composition and are structured to train a specific problem or benchmark (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
20
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
21
+ page_content=', a non-decomposed “pick-and-place” simulation)[1][2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
22
+ page_content=' This structural differ- ence causes a gap between simulation-for-execution and simulation-for-learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
23
+ page_content=' The learned results become specific to the trained scenario and contradicts with the simulation- for-execution where scenarios are non-fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
24
+ page_content=' Instead, a machine learning simulator could be designed similar to an execution simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
25
+ page_content=' The “pick-and-place” scenario can be decomposed into a sequenced simulation of “grasp then pick then bring then place then release.” The difference compared to the execution simulator is that 1All authors are with Microsoft, Redmond, WA, USA Kazuhiro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
26
+ page_content='Sasabuchi@microsoft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
27
+ page_content='com Physics Kinematics Rendering Post-process Task-sequencing simulator Concept Interface Environment Engine Pipeline CM Grasp state action selected task at step t demonstrated execution sequence configured training sequence CM Open CM Release training algorithms Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
28
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
29
+ page_content=' The proposed task-sequencing simulator which enables scenario composition for both learning and execution in robotics manipulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
30
+ page_content=' some of these blocks are “under-training” and are updated as data is collected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
31
+ page_content=' Once the update has finished, the trained block can be combined and reused for a different scenario, thus, the simulation-for-learning can directly transition to the simulation-for-execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
32
+ page_content=' In addition, such design enables learning new manipulation skills on top of programmed or prior-trained building blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
33
+ page_content=' For example, a “grasp” could be trained using the sequence “grasp then pick,” where “pick” is a programmed task to provide a supervised signal (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
34
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
35
+ page_content=', teach that the “grasp” was successful if the “pick” was successful).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
36
+ page_content=' This article introduces a task-sequencing simulator struc- ture which enables integrated learning-to-execution simula- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
37
+ page_content=' At its core, the simulator uses a unified block design called the “concept model,” which is proposed within this article and defines the necessary descriptions for training a task, collecting trained tasks, and running the tasks to compose a sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
38
+ page_content=' The rest of the article is outlined as below: Section II provides a background on existing robotic simulators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
39
+ page_content=' Sec- tion III explains the overall simulator structure for achieving machine learning to robot execution simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
40
+ page_content=' Section IV explains the concept model core component of the simulator and Section V provides some detailed example implementa- tions of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
41
+ page_content=' Section VI shows the capabilities of the arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
42
+ page_content='01382v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
43
+ page_content='RO] 3 Jan 2023 C16simulator in machine-learning-to-robot-execution followed by conclusions in Section VII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
44
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
45
+ page_content=' BACKGROUND While there are many existing simulators for robotics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
46
+ page_content=' existing simulators may not achieve the integrated learning- to-execution multi-step manipulation purpose for one of the following reasons: 1) the simulator targets a different domain other than manipulation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
47
+ page_content=' 2) the simulator can be used for manipulation but misses a capability in simulation- for-learning,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
48
+ page_content=' 3) the simulator can be used for manipulation but misses a capability in simulation-for-execution,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
49
+ page_content=' 4) the simulator can be used for manipulation and both learning- and-execution purposes but not specifically for learning-to- execution purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
50
+ page_content=' Popular robotics simulators[3][4] include Gazebo[5], MuJoCo[6], CoppeliaSim[7], CARLA[8], AirSim[9], and Webots[10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
51
+ page_content=' Gazebo has its advantage in its capability to simulate executions using ROS integrated sensors and ac- tuators but is not the best choice when it comes to data collection and machine learning due to its slow simulation performance and inconsistency in physics simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
52
+ page_content=' Thus, Gazebo falls into the second category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
53
+ page_content=' Engines like MuJoCo on the other hand, are suitable for stable physics simulation in machine learning but miss some robotics simulation capabilities such as inverse-kinematics and visual feedback (realistic rendering).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
54
+ page_content=' The focus is on physics simulation rather than an integrated simulator for robot executions, therefore falls into the third category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
55
+ page_content=' CARLA and Airsim mainly target automobiles such as drones and cars therefore miss some important features such as kinematics required for manipulation and falls into the first category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
56
+ page_content=' The CoppeliaSim is an integrated simulator with a kine- matics and physics engine, and the PyRep toolkit[11] can be used with the simulator for machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
57
+ page_content=' WeBots is also an integrated simulator and frameworks such as Deepbots[12] help the simulator to be used for machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
58
+ page_content=' The machine learning features of these simulators are external features that have been developed within the community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
59
+ page_content=' While it is possible to use these simulators for both execution and learning purposes, they have not been designed for integrated learning-to-execution but rather using for one-or-the-other purpose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
60
+ page_content=' That is, these simulators are not designed to connect learning-and-execution, rather, learning and execution are separate use cases where one uses a community provided wrapper for machine learning, and the other uses the integrated features to simulate a robotic system execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
61
+ page_content=' Compared to the existing simulators, the task-sequencing simulator was designed to connect simulation-for-learning and simulation-for-execution The simulator uses a con- cept model which enables composition of pre-trained, pro- grammed, or trained tasks, which is a powerful feature for going from machine learning to real robot execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
62
+ page_content=' (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
63
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
64
+ page_content=', such as plugging-in to machine learning platforms but then connecting to execute on ROS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
65
+ page_content=' More importantly, tied- integration allows features such as training using pre-sequent and post-sequent task executions, but also collecting reusable execution modules through training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
66
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
67
+ page_content=' TASK-SEQUENCING SIMULATOR OVERVIEW The task-sequencing simulator has two layers: the Concept Interface for “action decision” and the Environment Engine Pipeline for “state observing” (Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
68
+ page_content=' However, unlike a typical learning simulator, where a specific problem has a non-decomposable structure and the action decision is a single policy being updated as data is collected for the problem, the task-sequencing simulator adds an abstraction to this action decision so that the problem is composed of a sequence of tasks (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
69
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
70
+ page_content=', switches between a collection of tasks, where each task runs an individual policy), This way, a learned task policy can become part of a collection of policies for execution once the training has finished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
71
+ page_content=' Further details of each layer are described below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
72
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
73
+ page_content=' concept interface At each simulation time-step, a robot decides the next action depending on the current state of the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
74
+ page_content=' This decision is referred to as a policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
75
+ page_content=' When the relation be- tween the state, action, and next state (system dynamics) is completely known, this policy can be directly programmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
76
+ page_content=' When the system dynamics are unknown, either the learning of the policy or system dynamics is required through data collection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
77
+ page_content=' Data collection is efficient if collected only for the unknown dynamics and if known dynamics are directly computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
78
+ page_content=' Therefore, it is preferable to break down a robot’s execution to a series of tasks, where each task executes its own policy optimal for the system dynamics the task is cover- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
79
+ page_content=' In addition, breaking down a robot’s execution increases the reusability of each task policy and allows composing different execution scenarios from the task building blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
80
+ page_content=' The Concept Interface layer chooses and switches between the tasks for a training or execution scenario assuming (1) the series of tasks to simulate is known (ways for knowing are shown in the experiments), and (2) each task policy can indicate when the task has been completed (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
81
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
82
+ page_content=', has a learned or programmed completion signal).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
83
+ page_content=' During simulation-for-execution, the Concept Interface layer chooses the according task in a sequence and switches to the next task once the current policy returns a task completion signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
84
+ page_content=' The simulation-for-learning can be conducted in a similar way, except, the policy’s output of the completion signal is evaluated (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
85
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
86
+ page_content=', by the success of a subsequent task).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
87
+ page_content=' This interchangeable structure enables an integrated learning-to- execution simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
88
+ page_content=' To learn the termination signal under an arbitrary training sequence, the tasks share a unified design called a “concept model,” which is further explained in the later sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
89
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
90
+ page_content=' environment engine pipeline At a time-step control scale, a robot behaves under a sense-plan-act with a physical embodiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
91
+ page_content=' Thus, simulation in robotics requires three important engines: kinematics, physics (contact dynamics), and rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
92
+ page_content=' The role of the kinematics engine is to do the simulation between the robot’s action plan and the movement of the actual body.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
93
+ page_content=' The role of the physics engine is to do the simulation between the robot’s body and the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
94
+ page_content=' The role of the rendering engine is to do the simulation between the environment and the robot’s sensing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
95
+ page_content=' In more technical terms, the kinematics engine solves the mapping between cartesian space and configuration space, the physics engine solves the differential algebraic equation[13] using techniques such as velocity-impulse linear complementarity-based time-stepping methods[14][15], the rendering engine solves the rendering equation[16] about lighting paths for pixel color generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
96
+ page_content=' While rendering engines simulate the robot’s sensing by generating images, there is a gap between sensing and perceiving (extracting meaningful states from the gener- ated images).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
97
+ page_content=' Rather than directly learning the sensing-to- planning, sometimes it is more efficient to perceive-before- planning and extract visual features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
98
+ page_content=' Moreover, in robotics it is important to combine both visual and force feedback;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
99
+ page_content=' the visual features help compress the feedback so that the vision and force have an aligned state dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
100
+ page_content=' Thus, the proposed simulator adds a fourth “post-process engine” in conjunction with the rendering engine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
101
+ page_content=' These different-role engines are triggered in an ordered pipeline to calculate the current state of the simulation world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
102
+ page_content=' Often, simulators package specific engines to produce a single simulation world, but in general, these engines could run separately and combine/orchestrate multiple simulation worlds to produce better simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
103
+ page_content=' For example, ROS MoveIt could be used for accurate inverse kinematics sim- ulation, PyBullet for reproducible physics simulation, and the Unreal Engine for photo-realistic ray-traced rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
104
+ page_content=' Combining different engines is possible as long as each engine is able to load the same models of the robot/objects and is able to share the robot and object states among each engine (which can be done using TCP connections etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
105
+ page_content=' ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
106
+ page_content=' In addition, instead of using simulation engines, it is also possible to connect “real” engines which replace simulated physics with the real robot’s torque sensors and simulated rendering with real images from the robot’s camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
107
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
108
+ page_content=' CONCEPT MODELS A single task block operates under some system dynamics and achieves a goal state from an initial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
109
+ page_content=' Thus, the de- tails of a task can be described using the actors of the system, an initial state, a goal (end) state, and the parameters of the system dynamics (Figure 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
110
+ page_content=' This kind of task description can be referred to as a “task model”[17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
111
+ page_content=' This description is enough for executing a task if the system dynamics are completely known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
112
+ page_content=' The initial state is usually the end state of the previous task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
113
+ page_content=' However, when the dynamics are not fully known, learning is required, and during learning, the initial state must be randomized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
114
+ page_content=' Instead of fully describing the task, a task can be described using actor configurations, an initial state, a necessary goal state that is described from observable system states, a sufficient goal state that is described from non-observable Task Model initial state Action Programmed policy Actors obtained before execution Parameters completely known and obtained before execution defined goal state Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
115
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
116
+ page_content=' An illustration of a task model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
117
+ page_content=' Concept Model initial state Action Policy Actors a set of configurated settings Parameters partially known and randomized at training, estimated at execution necessary goal state sufficient goal state Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
118
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
119
+ page_content=' An illustration of a concept model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
120
+ page_content=' system states, and the partially known parameters of the system dynamics (Figure 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
121
+ page_content=' This kind of task description will be referred to as a “concept model” which compared to the task model may not be concrete enough for execution, but describes the concepts of the task to learn the execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
122
+ page_content=' In the special case where the system dynamics are fully known and the task is programmable, the descriptions of the concept model is identical to the task model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
123
+ page_content=' By providing a structured description format, the Con- cept Interface layer can access these blocks interchangeably within a training sequence as the structures are the same and only differ in the details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
124
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
125
+ page_content=' concept model usage in learning The concept model descriptions are used to learn the task completion signal as well as the actions to handle the unknown system dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
126
+ page_content=' The necessary goal state and sufficient goal state descriptions are used to evaluate whether the task completion signal and actions are appropriate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
127
+ page_content=' The evaluation is done by minimizing the cost of the current states to the goal states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
128
+ page_content=' Note that the cost to the necessary goal state is evaluated after every action decision, whereas the cost to the sufficient goal state is only evaluated once a task completion signal is chosen during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
129
+ page_content=' The actor configurations describe the possible environ- ments (the world including the robot and any manipulating target) for training the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
130
+ page_content=' If there are no pre-sequent tasks involved for training, then one of the actor configurations is used to define the initial state of the tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
131
+ page_content=' Otherwise, the end state of the previous task is the initial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
132
+ page_content=' Unlike the initial state, the actor configuration is independent from the states of the previous task, thus is configurable and can be used for randomizing the states for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
133
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
134
+ page_content=' concept model usage in execution When the dynamics are fully known, the concept model acts the same as the task model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
135
+ page_content=' The state of the task changes from the initial state using actions from a programmed policy based on the system dynamics and actors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
136
+ page_content=' The initial state of the task during execution is the end state of the previous task and the task ends once the goal state is achieved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
137
+ page_content=' When the dynamics are not fully known, the observable system states of the task changes using a learned policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
138
+ page_content=' Similar to the programmed case, the initial state is the observable states at the end of the previous task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
139
+ page_content=' However, since part of the goal state is non-observable, the end of the task cannot be identified just with the model descriptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
140
+ page_content=' Instead, the learned task completion signal from the concept model descriptions is used to identify the end of the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
141
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
142
+ page_content=' MODEL IMPLEMENTATIONS By following the concept model structure, a task is im- plemented in a way that can be trained but then collected as a building block for execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
143
+ page_content=' In this article, the screw theory[18] based separation of dynamics[17] is used to separate a task from another task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
144
+ page_content=' That is, once the relation between the manipulation target and the robot’s end-effector is initialized, a task breaks or maintains a contact state between the target and the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
145
+ page_content=' By classifying the inequality equation patterns of contact points, this leads to seven pure translation tasks and seven pure rotation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
146
+ page_content=' Figure 4 shows implementation examples of some of these tasks as concept models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
147
+ page_content=' Below describes the details of some of the examples in the figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
148
+ page_content=' Note that as the task classifica- tion only depends on the relation between the end-effector, manipulation target, and the environment, the movement of the arm (configuration space) can be ignored[19] and the task only focuses on the movement of the end-effector (cartesian space).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
149
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
150
+ page_content=' grasping The grasp task initiates the relation between the manipulation-target and the robot’s end-effector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
151
+ page_content=' The actors are a target object, an environment (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
152
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
153
+ page_content=', table), and the end-effector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
154
+ page_content=' The initial state is where the target object is attached to the environment but not attached to the end- effector (including shape of the finger joints).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
155
+ page_content=' The goal state is where the target object is also attached to the end-effector in a way such that enough force is exerted for performing a subsequent task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
156
+ page_content=' The parameters are the distance between the target and end-effector as well as the approaching direction of the end-effector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
157
+ page_content=' When details of the target object are completely known, a task model can be defined and programmed from the above details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
158
+ page_content=' However, in the real world, there is uncertainty in the shape of the object and distance to the target, distinguishing whether enough force is exerted is intractable due to the inaccuracy in the real contact sensors or lack of sensors to detect slipping, a grasp-failure due to finger-object collision during approach may occur if the policy is not carefully designed under the uncertainties of the object properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
159
+ page_content=' Grasp Concept Model freed OBJ-EEF Action Policy Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
160
+ page_content=' environment (ENV),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
161
+ page_content=' a set of objects (OBJ) with randomized shapes Parameters estimated approach distance OBJ-EEF,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
162
+ page_content=' approach direction of EEF NGS: attached OBJ-EEF SGS: next task success Open Concept Model initial OBJ pose Action Policy Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
163
+ page_content=' environment (ENV),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
164
+ page_content=' a set of objects (OBJ) with randomized parameters Parameters estimated radius,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
165
+ page_content=' axis center,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
166
+ page_content=' axis direction to maintain constraint OBJ-ENV NGS: maintain OBJ-ENV SGS: desired OBJ pose Pick Concept Model attached OBJ-ENV Action Programmed Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
167
+ page_content=' environment (ENV),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
168
+ page_content=' target object (OBJ) Parameters detach direction to break constraint OBJ-ENV freed OBJ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
169
+ page_content=' ENV Bring Concept Model initial OBJ pose Action Programmed Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
170
+ page_content=' target object (OBJ) Parameters move direction and distance of OBJ desired OBJ pose Place Concept Model freed OBJ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
171
+ page_content=' ENV Action Programmed Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
172
+ page_content=' environment (ENV),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
173
+ page_content=' target object (OBJ) Parameters attach direction to create constraint OBJ-ENV attached OBJ-ENV Release Concept Model attached OBJ-EEF Action Programmed Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
174
+ page_content=' target object (OBJ) Parameters release distance OBJ-EEF,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
175
+ page_content=' release direction of EEF freed OBJ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
176
+ page_content=' EEF Pour Concept Model initial ENV state Action Policy Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
177
+ page_content=' set of environment (ENV) and object (OBJs) with randomized parameters Parameters estimated ENV (cup) filled state,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
178
+ page_content=' estimated OBJ (pitcher) size,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
179
+ page_content=' estimated axis center,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
180
+ page_content=' direction of OBJ-ENV NGS: maintain OBJ-ENV SGS: desired ENV state Wipe Concept Model initial ENV state Action Policy Actors end-effector (EEF),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
181
+ page_content=' set of environment (ENV) and object (OBJs) with randomized parameters Parameters estimated ENV (plane) clean state,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
182
+ page_content=' estimated normal axis of OBJ-ENV NGS: maintain OBJ-ENV SGS: desired ENV state Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
183
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
184
+ page_content=' Example concept models of eight different tasks in the screw-theory based classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
185
+ page_content=' Instead,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
186
+ page_content=' a set of actor configurations is defined as object shapes from a randomized range,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
187
+ page_content=' a necessary goal state is defined as the end-effector to be in contact with the target object on an appropriate surface (which can be obtained on the real robot with the finger configurations and finger-torque sensors with a threshold to determine a binary contacted-or- not-contacted state),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
188
+ page_content=' a sufficient goal state is defined as a successful performance of a subsequent task,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
189
+ page_content=' the estimated distance is used for the parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
190
+ page_content=' The defined goal states are used to formulate the reward (cost-to-go) for learning the policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
191
+ page_content=' Using this concept model, the approaching strategy (ad- justed movement around the approaching direction of the end-effector) and the enough amount of “closing” of the fingers to perform a subsequent task is learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
192
+ page_content=' The learned policy chooses the sufficient amount based on the object shape which can partially be inferred by the shape of the end- effector finger joints once touching the object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
193
+ page_content=' The policy returns a termination signal once reached the enough amount of closing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
194
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
195
+ page_content=' door-opening The door-opening task is a one degrees-of-freedom pure rotation task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
196
+ page_content=' The actors are a target object (the door), an environment (the hinge), and the end-effector (attached to the door handle).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
197
+ page_content=' The initial state is where the end-effector and target object are at an attached state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
198
+ page_content=' The goal state is where the target object has moved to some desired orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
199
+ page_content=' The parameters are the rotation radius, the rotation axis center, and the rotation axis direction defined by the target and environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
200
+ page_content=' When details of the target and environment are completely known, a task model can be defined and programmed from the above details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
201
+ page_content=' However, in the real world, there is uncertainty in the environment parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
202
+ page_content=' Instead, a set of actor configurations randomizing the radius, a necessary goal state that moves the object along the environment constraint at each time-step (which can be obtained on the real robot by using a force sensor on the wrist and checking against a maximum-stress threshold), a sufficient goal state that ensures the door has reached the desired orientation, and estimation of the parameters are used to describe the concept model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
203
+ page_content=' By using an end-effector with only force-sensor feedback on the wrist, this model enables learning a policy which updates parameter estimations at each time-step, and then generates a hand motion based on the updated parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
204
+ page_content=' The policy returns a termination signal once inferred that the desired orientation has been reached.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
205
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
206
+ page_content=' bringing The bringing task is a six degrees-of-freedom translation and rotation task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
207
+ page_content=' The actors are the target object and the end-effector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
208
+ page_content=' The initial state is where the end-effector and target object are at an attached state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
209
+ page_content=' The goal state is where the target object has moved to some relative positioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
210
+ page_content=' The parameters are the moving direction and distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
211
+ page_content=' Since there are no uncertainties in the target or environ- ment, the parameters can be manually specified and the goal can be directly specified from the parameters, Thus, the concept model is identical to the task model and can be programmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
212
+ page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
213
+ page_content=' EXPERIMENTS Experiments were conducted using the concept model implementations shown in the previous section, and the developed task-sequencing simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
214
+ page_content=' For the learning experiments, the series of tasks to simulate were predefined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
215
+ page_content=' These experiments were performed to show the effectiveness of the simulator and its reusability for training different tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
216
+ page_content=' By running the pre-sequent tasks of the task-to-train at the start of an episode, and by running the subsequent tasks at reward return, the simulator is compatible with common reinforcement learning platforms, and utilizes off-the-shelf learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
217
+ page_content=' For the experiments, the simulator was connected to the Bonsai platform and used the PPO algo- rithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
218
+ page_content=' For the execution experiments, the series of tasks were obtained from human demonstrations and the actions were generated using the learned task policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
219
+ page_content=' These experiments were performed to show the effectiveness of the simulator for execution simulations (execution of different composed sce- narios).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
220
+ page_content=' Note that the exact same simulator and task blocks used for training was used for this experiment, showing the simulator’s capability to transition from simulation-for- learning to simulation-for-execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
221
+ page_content=' For simulation, states were obtained by plugging to the environment engine pipeline the PyBullet engine for the physics engine role, and the Unreal Engine for rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
222
+ page_content=' For the real robot, states were obtained plugging ROS (connected via roslibpy) to the environment engine pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
223
+ page_content=' For the arm kinematics, the ROS MoveIt package was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
224
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
225
+ page_content=' training Figure 5-(A) shows the task-sequencing simulator config- urations for running the grasp training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
226
+ page_content=' A configured training sequence “grasp then pick” is passed to the simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
227
+ page_content=' The “grasp” task is the task-to-train, and a programmed “pick” is used as the subsequent task for evaluating the sufficient goal state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
228
+ page_content=' Figure 5-(B) shows the trained grasp results performed on a real robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
229
+ page_content=' The concept model was designed with the object shape parameters as unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
230
+ page_content=' Regardless of such uncertainty, the learned policy successfully grasps the different shaped objects including but not limited to a box, a cylindrical cup, an oval rice pack, and a diamond-shaped candy box.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
231
+ page_content=' Figure 5-(C) shows a learned grasp for a different robot hand, which was trained using the same simulator and concept models but with a different actor configuration (end-effector) setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
232
+ page_content=' The results show the reusability of the simulator for training different robots with different mechanics (a hand with multiple fingers and a gripper with limited degrees of freedom).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
233
+ page_content=' Figure 5-(D) shows that by changing the configured train- ing sequence to “grasp then open,” the door-opening task is trained using the same simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
234
+ page_content=' The “open” task is the task- to-train, and the trained “grasp” is reused as the pre-sequent task for initiating the relation between the end-effector and the target door.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
235
+ page_content=' Regardless of uncertainty in the rotation radius, center, and axis direction, the real robot performed the door-opening using the learned policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
236
+ page_content=' Although the policy was trained only using simulated data, the policy is directly applicable to the real robot as the sufficient goal state does not require observability on the real robot and because the policy action decisions only rely on the states with very small sim-to-real gaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
237
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
238
+ page_content=' execution Figure 6-(A) shows the task-sequencing simulator used with a demonstrated sequence by a human.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
239
+ page_content=' Instead of a configured sequence as in the previous training experiments, the sequence is automatically generated through demonstra- tion decomposition using the method described in [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
240
+ page_content=' The same concept models from the training experiments are used with the policy-update being disabled (the simulator is not connected to any training algorithm and instead uses a fixed learned policy without updates).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
241
+ page_content=' Task-sequencing simulator Concept Interface Environment Engine Pipeline CM Grasp state action configured training sequence CM Pick training algorithms (A) (B) (C) connect learned policy to the real robot train policy on a different robot hand CM Grasp CM Open training sequence for door-opening (D) configure to a different training scenario Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
242
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
243
+ page_content=' Results of the task-sequencing simulator when used for learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
244
+ page_content=' Task-sequencing simulator Concept Interface Environment Engine Pipeline CM Grasp state action demonstrated execution sequence CM Pick (A) simulation CM Bring CM Bring CM Place CM Release real another scenario physics rendering (B) (C) (D) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
245
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
246
+ page_content=' Results of the task-sequencing simulator when used for execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
247
+ page_content=' Figure 6-(B) shows a simulated execution of the demon- strated sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
248
+ page_content=' The first row shows the outputs of the physics engine and the second row shows the outputs of the rendering engine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
249
+ page_content=' As both training and execution run on the same system, the learned policy can easily be used as a simulation-for-execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
250
+ page_content=' The learned policy is already a building block that can be combined with other tasks to generate an application such as “pick up a cup from the upper shelf and re-place it to the bottom shelf.” Figure 6-(C) shows a real robot execution of the demon- strated sequence by switching the engines in the environment engine pipeline to connect with ROS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
251
+ page_content=' This shows how the simulator can go from the simulated robot execution to the real robot execution by using the same policy connections but by changing the engines in which the states are obtained, and the actions are performed against.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
252
+ page_content=' Usually, going from simulation to real introduces a sim-to-real gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
253
+ page_content=' However, only part of the scenario sequence uses a learned policy and due to the careful design of the concept models to divide learning observable dynamics (necessary goal states) from learning hidden dynamics (sufficient goal states), no such gap was encountered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
254
+ page_content=' Figure 6-(D) shows an execution of a different sequence “pick up a cup from the table and throw it in the trash.” This scenario uses the same concept models and only differs in the demonstrated input, showing how using the simulator and concept model descriptions enable reusing the learned policies for different execution scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
255
+ page_content=' If a policy was learned against a full “pick-and-place” scenario, the policy would not easily scale to the “pick-and-throw” scenario as the problem dynamics are different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
256
+ page_content=' VII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
257
+ page_content=' CONCLUSIONS This article introduced the task-sequencing simulator which bridges simulation-for-learning to simulation-for- execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
258
+ page_content=' The simulation scenario for learning is created us- ing a sequence of tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
259
+ page_content=' This way the simulation-for-learning has the same structure as the simulation-for-execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
260
+ page_content=' At its core, the simulator uses a concept model which enables sequencing mixed programmed, trained, and under-training building blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
261
+ page_content=' While the simulator has a large advantage in terms of integrated system development, the simulator also provides new directions for simulation in execution and simulation in learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
262
+ page_content=' From an execution perspective, the simulator allows com- posing a task-sequence using both programmed and trained tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
263
+ page_content=' Unlike programmed-only sequences, the advantage of mixing trained blocks is that, some of the tasks can contain uncertainty and the goal state of a task can be described using implicit system parameters (the goal state does not have to be obtained directly from the real robot).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
264
+ page_content=' The key is that, whether the observed state and selected actions suffice the goal state is learned as a termination signal through training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
265
+ page_content=' From a learning perspective, the simulator and concept model design have the following advantages: First, the sim- ulation is reusable and easily applicable to slight changes in the scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
266
+ page_content=' A policy for a different end-effector can be learned by just changing the actor configurations in the concept model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
267
+ page_content=' A policy can be optimized for different scenarios by just changing the subsequent task in the suffi- cient goal state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
268
+ page_content=' Second, defining the learning problem using the concept model design enables a hierarchical learning- structure as well as a structure for reducing sim-to-real gaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
269
+ page_content=' Any state parameters that do not have a large gap when observed with the real robot is used for defining the necessary goal state, whereas any state parameters that have a large gap when observed with the real robot is a sufficient goal state (implicitly learned in simulation but no need to be observed with the real robot).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
270
+ page_content=' This type of formulation is possible as only the parts with uncertainty are being learned instead of learning the entire scenario sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
271
+ page_content=' Following this structured formulation has allowed going from simulation to real without any extra real-world data collection and achieving a reusable policy applicable to different execution scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
272
+ page_content=' ACKNOWLEDGMENT The authors thank Brice Chung’s team, Aydan Aksoylar and Kartavya Neema for their help in the reward designs and training of the concept models used in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
273
+ page_content=' REFERENCES [1] Matthias Plappert, Marcin Andrychowicz, Alex Ray, Bob McGrew, Bowen Baker, Glenn Powell, Jonas Schneider, Josh Tobin, Maciek Chociej, Peter Welinder, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
274
+ page_content=' Multi-goal reinforcement learning: Challenging robotics environments and request for research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
275
+ page_content=' arXiv preprint arXiv:1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
276
+ page_content='09464, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
277
+ page_content=' [2] Linxi Fan, Yuke Zhu, Jiren Zhu, Zihua Liu, Orien Zeng, Anchit Gupta, Joan Creus-Costa, Silvio Savarese, and Li Fei-Fei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
278
+ page_content=' Surreal: Open- source reinforcement learning framework and robot manipulation benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
279
+ page_content=' In Conference on Robot Learning, pages 767–782.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
280
+ page_content=' PMLR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
281
+ page_content=' [3] Jack Collins, Shelvin Chand, Anthony Vanderkop, and David Howard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
282
+ page_content=' A review of physics simulators for robotic applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
283
+ page_content=' IEEE Access, 9:51416–51431, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
284
+ page_content=' [4] Wenshuai Zhao, Jorge Pe˜na Queralta, and Tomi Westerlund.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
285
+ page_content=' Sim-to- real transfer in deep reinforcement learning for robotics: a survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
286
+ page_content=' In 2020 IEEE Symposium Series on Computational Intelligence (SSCI), pages 737–744.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
287
+ page_content=' IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
288
+ page_content=' [5] Nathan Koenig and Andrew Howard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
289
+ page_content=' Design and use paradigms for gazebo, an open-source multi-robot simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
290
+ page_content=' In 2004 IEEE/RSJ Inter- national Conference on Intelligent Robots and Systems (IROS)(IEEE Cat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
291
+ page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
292
+ page_content=' 04CH37566), volume 3, pages 2149–2154.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
293
+ page_content=' IEEE, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
294
+ page_content=' [6] Emanuel Todorov, Tom Erez, and Yuval Tassa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
295
+ page_content=' Mujoco: A physics engine for model-based control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
296
+ page_content=' In 2012 IEEE/RSJ international conference on intelligent robots and systems, pages 5026–5033.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
297
+ page_content=' IEEE, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
298
+ page_content=' [7] Eric Rohmer, Surya PN Singh, and Marc Freese.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
299
+ page_content=' V-rep: A versatile and scalable robot simulation framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
300
+ page_content=' In 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1321–1326.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
301
+ page_content=' IEEE, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
302
+ page_content=' [8] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
303
+ page_content=' Carla: An open urban driving simulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
304
+ page_content=' In Conference on robot learning, pages 1–16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
305
+ page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
306
+ page_content=' [9] Shital Shah, Debadeepta Dey, Chris Lovett, and Ashish Kapoor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
307
+ page_content=' Airsim: High-fidelity visual and physical simulation for autonomous vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
308
+ page_content=' In Field and service robotics, pages 621–635.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
309
+ page_content=' Springer, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
310
+ page_content=' [10] Olivier Michel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
311
+ page_content=' Cyberbotics ltd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
312
+ page_content=' webots™: professional mobile robot simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
313
+ page_content=' International Journal of Advanced Robotic Systems, 1(1):5, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
314
+ page_content=' [11] Stephen James, Marc Freese, and Andrew J Davison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
315
+ page_content=' Pyrep: Bringing v-rep to deep robot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
316
+ page_content=' arXiv preprint arXiv:1906.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
317
+ page_content='11176, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
318
+ page_content=' [12] Manos Kirtas, Konstantinos Tsampazis, Nikolaos Passalis, and Anas- tasios Tefas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
319
+ page_content=' Deepbots: A webots-based deep reinforcement learning framework for robotics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
320
+ page_content=' In IFIP International Conference on Artificial Intelligence Applications and Innovations, pages 64–75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
321
+ page_content=' Springer, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
322
+ page_content=' [13] Edward J Haug.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
323
+ page_content=' Computer aided kinematics and dynamics of mechan- ical systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
324
+ page_content=' Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
325
+ page_content=' 1: basic methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
326
+ page_content=' Allyn & Bacon, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
327
+ page_content=', 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
328
+ page_content=' [14] David E Stewart and Jeffrey C Trinkle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
329
+ page_content=' An implicit time-stepping scheme for rigid body dynamics with inelastic collisions and coulomb friction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
330
+ page_content=' International Journal for Numerical Methods in Engineering, 39(15):2673–2691, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
331
+ page_content=' [15] Mihai Anitescu and Florian A Potra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
332
+ page_content=' Formulating dynamic multi-rigid- body contact problems with friction as solvable linear complementarity problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
333
+ page_content=' Nonlinear Dynamics, 14(3):231–247, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
334
+ page_content=' [16] James T Kajiya.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
335
+ page_content=' The rendering equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
336
+ page_content=' In Proceedings of the 13th annual conference on Computer graphics and interactive techniques, pages 143–150, 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
337
+ page_content=' [17] Katsushi Ikeuchi, Naoki Wake, Riku Arakawa, Kazuhiro Sasabuchi, and Jun Takamatsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
338
+ page_content=' Semantic constraints to represent common sense required in household actions for multi-modal learning-from- observation robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
339
+ page_content=' arXiv preprint arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
340
+ page_content='02201, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
341
+ page_content=' [18] MS Ohwovoriole and B Roth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
342
+ page_content=' An extension of screw theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
343
+ page_content=' 1981.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
344
+ page_content=' [19] Kazuhiro Sasabuchi, Naoki Wake, and Katsushi Ikeuchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
345
+ page_content=' Task-oriented motion mapping on robots of various configuration using body role division.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
346
+ page_content=' IEEE Robotics and Automation Letters, 6(2):413–420, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
347
+ page_content=' [20] Naoki Wake, Iori Yanokura, Kazuhiro Sasabuchi, and Katsushi Ikeuchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
348
+ page_content=' Verbal focus-of-attention system for learning-from- observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
349
+ page_content=' In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 10377–10384.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
350
+ page_content=' IEEE, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idAzT4oBgHgl3EQfbPxy/content/2301.01382v1.pdf'}
itAzT4oBgHgl3EQfbPyr/content/tmp_files/2301.01383v1.pdf.txt ADDED
@@ -0,0 +1,1686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ How to get the most out of Twinned Regression
2
+ Methods
3
+ Sebastian J. Wetzel
4
+ University of Waterloo, Waterloo, Ontario N2L 3G1, Canada
5
+ Perimeter Institute for Theoretical Physics, Waterloo, Ontario N2L 2Y5, Canada
6
+ Homes Plus Magazine Inc., Waterloo, Ontario N2V 2B1, Canada
7
+ Abstract.
8
+ Twinned regression methods are designed to solve the dual problem to
9
+ the original regression problem, predicting differences between regression targets rather
10
+ then the targets themselves. A solution to the original regression problem can be
11
+ obtained by ensembling predicted differences between the targets of an unknown data
12
+ point and multiple known anchor data points. We explore different aspects of twinned
13
+ regression methods: (1) We decompose different steps in twinned regression algorithms
14
+ and examine their contributions to the final performance, (2) We examine the intrinsic
15
+ ensemble quality, (3) We combine twin neural network regression with k-nearest neighbor
16
+ regression to design a more accurate and efficient regression method, and (4) we develop
17
+ a simplified semi-supervised regression scheme.
18
+ Keywords: Artificial Neural Networks, k-Nearest Neighbors, Random Forests, Regression,
19
+ Semi-Supervised Learning
20
+ arXiv:2301.01383v1 [cs.LG] 3 Jan 2023
21
+
22
+ How to get the most out of Twinned Regression Methods
23
+ 2
24
+ 1. Introduction
25
+ Regression is one of the most general and common machine-learning tasks, practitioners
26
+ in many different fields of science and industry rely on methods that help them to make
27
+ the most accurate and reliable predictions on new data points inferred from a limited
28
+ amount of training data. Conventional regression methods aim to infer the mapping
29
+ of input features to one or multiple target variables. Twinned regression methods aim
30
+ to solve the dual problem of predicting the difference between target values, a solution
31
+ to the original regression problem can then be obtained by evaluating the predictions
32
+ between a new unknown data point and multiple anchor data points. This process creates
33
+ an ensemble of predictions from a single trained model, which tends to be more accurate
34
+ than solving the original regression problem directly and opens up semi-supervised
35
+ learning and uncertainty estimates for a very low cost [1, 2, 3]. However, the trade-off
36
+ is the poor scaling with large data sets, since the effective training data set size scales
37
+ quadratically with the size of the original training data set.
38
+ Thus, these methods are beneficial in domains where data is either scarce or costly
39
+ to obtain. This is the case in for example real estate where markets differ from city to city
40
+ and data becomes outdated quickly [4, 5, 6]. Another example stems from calculations
41
+ in chemistry where simulations of chemical systems based on quantum mechanics require
42
+ an enormous amount of computational resources [7, 8].
43
+ This article is meant to be a practitioner’s guide to using twinned regression methods
44
+ that guides the reader through advantages and trade-offs and attempts to answer most
45
+ questions that were left open in the recent years.
46
+ The main question on our mind is why can such a simple trick of solving the dual
47
+ problem yield a more accurate prediction than solving the regression problem directly.
48
+ While we do not fully answer this question, we decomposed twin neural network regression
49
+ (TNNR) into different steps each with the potential to enhance the performance over
50
+ traditional algorithms. These include increased effective data set size obtained though
51
+ pairing training data points, or different ensembles of TNNR predictions. Further, by
52
+ mapping extreme cases of TNNR to k-nearest neighbor (k-NN) regression and normal
53
+ artificial neural networks (ANN) we can observe a distinct performance behaviour of
54
+ twinned regression methods different from traditional regression.
55
+ Further, we are eager to improve the accuracy of twinned regression methods. For
56
+ this purpose, we devise an improvement to TNNR based on the idea of weighting the
57
+ predictions from different anchors. This leads us to combine k-nearest neighbors (kNN)
58
+ with TNNR to an even more accurate regression scheme.
59
+ The semi-supervised regression framework for TNNR invented in [2] is specifically
60
+ tailored to neural network-based regression. It is based on enforcing consistency conditions
61
+ on unknown data points through a modified loss function. At the end of this manuscript
62
+ we examine a way to translate a simplified version of this semi-supervised learning scheme
63
+ to a twinned version of random forests (RF) proposed in [3].
64
+ In some projects it is important to apply neural networks with strong memory
65
+
66
+ How to get the most out of Twinned Regression Methods
67
+ 3
68
+ constraints, this might be the case in small chips or autonomous systems [9, 10]. In
69
+ these cases it would normally be very inefficient to store ensembles of machine learning
70
+ models due to the increased number of parameters. In contrast to that, with twinned
71
+ regression methods, one only needs to store additional anchor data points.
72
+ 2. Prior Work
73
+ The pairwise comparison inherent to twinned regression methods is inspired by Siamese
74
+ neural networks which were devised to solve the similarity classification problem as
75
+ it occurs in fingerprint recognition or signature verification [11, 12]. Siamese neural
76
+ networks contain two identical neural networks with shared weights which project a
77
+ pair of inputs into a latent space on which the pairwise similarity is determined by the
78
+ distance. Twinned regression methods also take a pair of inputs to predict the difference
79
+ between the labels [1].
80
+ Twin neural network regression [1] was invented as a regression method that solves
81
+ the dual problem of predicting pairwise differences between the target values of pairs of
82
+ input data points. Independently, the same idea has been developed for random forests
83
+ [3]. This kind of regression framework has been shown to have several advantages: (1) it
84
+ allows for a very efficient generation of ensemble predictions [1, 3]. Typically in methods
85
+ that generate ensembles from training a single machine learning model, the predictions
86
+ are strongly correlated [13, 14] since they can be deformed into each other through small
87
+ perturbations. In twinned regression methods however, ensemble members are separated
88
+ by the distance of the input data points themselves. (2) Twinned regression methods
89
+ tend to be more accurate than the underlying base algorithm on many data sets [1, 3],
90
+ (3) consistency conditions allow for the formulation of uncertainty estimators in addition
91
+ to the ensemble variance [1, 3] and (4) loops containing unlabelled data points can be
92
+ supplied while training, hence turning the method into a semi-supervised regression
93
+ algorithm [2]. Further, (5) the intrinsic uncertainty estimation lends itself for active
94
+ learning [3].
95
+ A central contribution of this article is the combination of twin neural network
96
+ regression and k-NN regression to increase the accuracy of over standard twin neural
97
+ network regression. Similarly, artificial neural networks have been employed in tandem
98
+ with k-NN regression in different contexts before [15, 16, 17].
99
+ 3. Reformulation of the Regression Problem
100
+ A regression problem can be formulated as follows: Given a labelled training data
101
+ set of n data points Xtrain = (xtrain
102
+ 1
103
+ ,...,xtrain
104
+ n
105
+ ) with their corresponding target values
106
+ Y train = (ytrain
107
+ 1
108
+ ,...,ytrain
109
+ m
110
+ ), we are tasked to find a function f such that the deviation
111
+ between f(xi) and yi is minimized with respect to a predefined objective function for all
112
+ data points xi on the data manifold. In this work, this function is the root mean square
113
+ error LRMSE =
114
+
115
+ ∑n
116
+ i=1(f(xi) − yi)2. Unless stated otherwise, all performance measures
117
+
118
+ How to get the most out of Twinned Regression Methods
119
+ 4
120
+ x
121
+ y=f(x)
122
+ x1
123
+ x2
124
+ y2-y1=F(x2,x1)
125
+ F(x1,x2)
126
+ f(x2)
127
+ F(x2,x3)
128
+ F(x3,x1)
129
+ f(x3)
130
+ f(x1)
131
+ Traditional Regression
132
+ Twinned Regression
133
+ Figure 1: Dual formulation of a regression problem: A traditional solution to a regression problem
134
+ consists of finding an approximation to the function that maps a data point x to its target value f(x) = y.
135
+ Twinned regression methods solve the dual problem of mapping a pair of inputs x1 and x2 to the
136
+ difference between the target values F(x2,x1) = y2 − y1. The resulting function can then be employed
137
+ as an estimator for the original regression problem y2 = F(x2,x1) + y1 given a labelled anchor point
138
+ (x1,y1). Twinned regression methods must satisfy loop consistency: predictions along each loop sum to
139
+ zero: F(x1,x2) + F(x2,x3) + F(x3,x1) = 0.
140
+ are evaluated on unknown test data (Xtest,Y test).
141
+ Twinned regression methods aim to solve a reformulation of the original regression
142
+ problem which is visualized in Fig. 1. For each pair of data points (xtrain
143
+ i
144
+ ,xtrain
145
+ j
146
+ ) we
147
+ train a regression model to find a function F to predict the difference
148
+ F(xi,xj) = yi − yj
149
+ .
150
+ (1)
151
+ This function F can be used to construct a solution to the original regression problem
152
+ via ypred
153
+ i
154
+ = F(xi,xj) + yj, where (xj,yj) is an anchor whose target value is known. Every
155
+ training data point xtrain
156
+ j
157
+ ∈ Xtrain can be used as such an anchor. A more accurate
158
+ estimate for the solution of the original regression problem is obtained by averaging over
159
+ many differences between a fixed unknown data point and different anchor data points
160
+ ypred
161
+ i
162
+ = 1
163
+ n
164
+ n
165
+
166
+ j=1
167
+ (F(xi,xtrain
168
+ j
169
+ ) + ytrain
170
+ j
171
+ )
172
+ = 1
173
+ n
174
+ n
175
+
176
+ j=1
177
+ (1
178
+ 2F(xi,xtrain
179
+ j
180
+ ) − 1
181
+ 2F(xtrain
182
+ j
183
+ ,xi) + ytrain
184
+ j
185
+ )
186
+ .
187
+ (2)
188
+ The increase in accuracy is based on averaging out the noise from different anchors and
189
+ the reduction of the variance error via an ensemble of predictions. Previous works [1, 3]
190
+ recommended using the whole training data set as anchors, hence creating an ensemble
191
+ of difference predictions yi − yj which is twice as large as the training set for every single
192
+ prediction of yi.
193
+ A major advantage of the dual formulation is the description via loops containing
194
+ multiple data points as can be seen in Fig. 1. In contrast to traditional regression, the
195
+
196
+ How to get the most out of Twinned Regression Methods
197
+ 5
198
+ results of twinned regression methods need to satisfy consistency conditions, for example
199
+ for each three data points x1,x2,x3, summing up the predictions along a closed loop
200
+ should yield zero: F(x1,x2) + F(x2,x3) + F(x3,x1) = 0. During inference, violations of
201
+ these consistency conditions give rise to uncertainty estimates [1, 3]. Enforcing loop
202
+ consistency on predictions involving unlabelled data points in the training phase is what
203
+ makes twinned regression methods into semi-supervised regression algorithms.
204
+ While neural networks are naturally good learners of linear functions this is not the
205
+ case for other algorithms like random forests. For this reason, [3] proposed to augment
206
+ the input features by their difference (xi,xj) → (xi,xj,xi − xj). One might argue that
207
+ this improvement is similar to common data augmentation, however, this is a feature
208
+ that traditional machine learning algorithms don’t have access to, because it requires
209
+ two different data points.
210
+ 4. Notes About Experiments
211
+ All experiments in this article are performed on the data sets outlined in Appendix A.
212
+ Since only neural network based methods scale favorably with the data set size, they use
213
+ the full data sets of which 70% are used for training, 10% as validation set and 20% as
214
+ test set. The details of the neural network architectures can be found in the appendix
215
+ Appendix C. Random forests and especially twinned random forests scale poorly with
216
+ the data set size thus only 100 training data points are chosen from the data sets and
217
+ 100 data points comprise the test sets. Random forests do not need validation sets, since
218
+ the hyper=parameters are optimized via 5-fold cross-validation. In section Appendix D
219
+ one can find the details about our random forest implementations. All experiments are
220
+ repeated for 25 random but fixed splits of training, test, and if applicable, validation
221
+ data.
222
+ 5. Ensemble Performance
223
+ Twinned regression methods have been shown to produce accurate solutions to regression
224
+ problems [1, 3], comparable to or better than other current state-of-the-art algorithms at
225
+ the cost of scaling poorly towards larger data sets. This naturally leads to the question of
226
+ where the increased performance stems from. The reformulation of a regression problem
227
+ into its dual problem of predicting differences between target values opens up several
228
+ potential reasons for improved accuracy. These include increased effective training set
229
+ size, internal ensembling of predictions (see explanation in Appendix B), or the nature of
230
+ solving a different problem. In the following we examine these reasons at the example of
231
+ TNNR, however, we assume the answers will also be valid for other baseline algorithms.
232
+ Let us start with discussing different kinds of ensembles and their effect on accuracy.
233
+ Fig. 2 contains the results of several experiments examining the performance of different
234
+ ensemble types of ANN regression and TNNR. For each data set, the baseline results are
235
+ the solid blue horizontal line, which represents the test RMSE after applying standard
236
+
237
+ How to get the most out of Twinned Regression Methods
238
+ 6
239
+ Figure 2: Comparison of different ensembles of ANNs and TNNR measured by RMSE vs number of
240
+ ensemble members for the dashed lines. Ensembles can be created either by training several models or
241
+ evaluating TNNR for different anchors. The blue solid line corresponds to training and evaluating one
242
+ TNNR model on all possible training pairs and predicting the results with all possible anchors. The
243
+ dashed blue line varies the number of anchors during the inference phase and converges to the solid
244
+ blue line in the limit of increasing the inference anchors to the full training set. The dashed orange line
245
+ indicates traditional ANN ensembles where multiple ANNs are trained independently. The dashed green
246
+ line corresponds to ensembles of independently trained TNNR models, if the ensemble size is 1, this
247
+ is equivalent to the solid blue line. The dashed red line corresponds to independently trained TNNR
248
+ models each having only one inference anchor per prediction.
249
+ full anchor TNNR and the leftmost point of the orange line which represents the results
250
+ of applying a single ANN, confirming that TNNR almost always yields a lower RMSE
251
+ than ANN regression.
252
+ In order to compare traditional ANNs with TNNR, we observe that after training we
253
+ can map TNNR to an ANN for each anchor, since both ANN regression and TNNR use
254
+ the same internal architecture. For each fixed xj ∈ Xtrain an ANN ˜f is defined through
255
+ ˜fj(xi) ∶= F(xi,xj) + yj
256
+ (3)
257
+ The features of xj modify the weights of ˜fj while yj is absorbed by the output neuron
258
+ inside its bias. At that point, the only difference between each ˜fj and an equivalent
259
+ ANN is the procedure with which the weights were optimized.
260
+
261
+ ANNEnsemblevsTNNanchorensemble
262
+ Bio Conservation
263
+ Boston Housing
264
+ Concrete Strength
265
+ 3.7
266
+ 5.50
267
+ 06'0
268
+ 3.6
269
+ 5.25
270
+ 0.85
271
+ 3.5
272
+ RMSE
273
+ 5.00
274
+ 3.4
275
+ 4.75
276
+ 08'0
277
+ EE
278
+ 3.2
279
+ 4.50
280
+ 0.75
281
+ 3.1
282
+ 4.25
283
+ 16
284
+ 32
285
+ Energy Efficiency
286
+ RCL Circuit
287
+ Test Function
288
+ 0.012
289
+ 1.0
290
+ 0.020
291
+ 0.010
292
+ 0.9
293
+ 0.018
294
+ 0.008
295
+ 0.8
296
+ 0.016
297
+ 0.006
298
+ 0.7
299
+ 0.014
300
+ 0.004
301
+ 16
302
+ 32
303
+ 16
304
+ 32
305
+ 16
306
+ 32
307
+ Wheatstone Bridge
308
+ Red Wine Quality
309
+ Yacht Hydrodynamics
310
+ 0.045
311
+ 0.70
312
+ all anchors inference
313
+ 0.775
314
+ +TNN inference anchors
315
+ 0.040
316
+ 0.750
317
+ ANN ensemble
318
+ 0.65
319
+ 0.725
320
+ Ensemble of full anchor TNN
321
+ Ensemble of one anchor TNN
322
+ 0.030
323
+ 0.700
324
+ 0.60
325
+ 0.675
326
+ 0.025
327
+ 0.650
328
+ 0.55
329
+ 2
330
+ 16
331
+ 32
332
+ 2
333
+ 4
334
+ 16
335
+ 32
336
+ 2
337
+ 4
338
+ 16
339
+ 32
340
+ Ensemble Members
341
+ Ensemble Members
342
+ Ensemble MembersHow to get the most out of Twinned Regression Methods
343
+ 7
344
+ Table 1: Best estimates for test RMSEs obtained by artificial neural network(ANN) regression compared
345
+ to ensembles of ANN regression. Our confidence on the RMSEs is determined by their standard error.
346
+ We train on 70% of the available data, 10% validation data, 20% test data.
347
+ Single ANN
348
+ 32 ANN ensemble
349
+ Gain
350
+ Bio Conservation(BC)
351
+ 0.7874±0.012
352
+ 0.7438±0.0118
353
+ 5.53%
354
+ Boston Housing(BH)
355
+ 3.5695±0.1051
356
+ 3.3291±0.1137
357
+ 6.73%
358
+ Concrete Strength(CS)
359
+ 5.3829±0.0964
360
+ 4.9842±0.0938
361
+ 7.41%
362
+ Energy Efficiency(EE)
363
+ 1.0310±0.0313
364
+ 0.9649±0.0224
365
+ 6.42%
366
+ RCL Circuit(RCL)
367
+ 0.0193±0.0002
368
+ 0.0157±0.0002
369
+ 18.41%
370
+ Test Function(TF)
371
+ 0.0076±0.0005
372
+ 0.0071±0.0005
373
+ 6.35%
374
+ Wheatstone Bridge(WSB)
375
+ 0.0443±0.0016
376
+ 0.0359±0.0014
377
+ 19.1%
378
+ Red Wine Quality(WN)
379
+ 0.7681±0.024
380
+ 0.6487±0.0068
381
+ 15.54%
382
+ Yacht Hydrodynamics(YH)
383
+ 0.6723±0.0406
384
+ 0.6143±0.0357
385
+ 8.63%
386
+ Table 2: Best estimates for test RMSEs obtained by Twin Neural Network Regression (TNNR). We
387
+ measure the improvement between using a single anchor during inference phase and using all anchors.
388
+ Further, the latter is compared to an ensemble of full anchor TNNR.
389
+ 1 Anchor TNNR
390
+ All Anchor TNNR
391
+ Gain
392
+ Ensemble of 32 TNNR
393
+ Gain
394
+ BC
395
+ 0.9021±0.0131
396
+ 0.8140±0.0149
397
+ 9.77%
398
+ 0.7947±0.0134
399
+ 2.37%
400
+ BH
401
+ 3.4793±0.1262
402
+ 3.2897±0.1293
403
+ 5.45%
404
+ 3.2232±0.1256
405
+ 2.02%
406
+ CS
407
+ 4.9602±0.1073
408
+ 4.5385±0.1124
409
+ 8.5%
410
+ 4.2791±0.0995
411
+ 5.71%
412
+ EE
413
+ 0.7445±0.0224
414
+ 0.7071±0.0229
415
+ 5.02%
416
+ 0.6468±0.0196
417
+ 8.53%
418
+ RCL
419
+ 0.0212±0.0003
420
+ 0.0155±0.0002
421
+ 26.6%
422
+ 0.0130±0.0001
423
+ 16.23%
424
+ TF
425
+ 0.0106±0.0004
426
+ 0.0053±0.0004
427
+ 50.24%
428
+ 0.0028±0.0001
429
+ 46.41%
430
+ WSB
431
+ 0.0309±0.0009
432
+ 0.0239±0.0009
433
+ 22.81%
434
+ 0.0227±0.0008
435
+ 5.15%
436
+ WN
437
+ 0.7654±0.0059
438
+ 0.6985±0.0064
439
+ 8.75%
440
+ 0.6713±0.0055
441
+ 3.89%
442
+ YH
443
+ 0.6344±0.0363
444
+ 0.5798±0.0362
445
+ 8.62%
446
+ 0.5723±0.0336
447
+ 1.29%
448
+ This gives us access to a framework to directly compare ensembles of ANNs and
449
+ the implicit ensembles generated by TNNR using multiple anchors during inference. We
450
+ examine the results of these both models through the orange and blue dashed lines in
451
+ Fig. 2. While both curves reduce the RMSE as we increase the ensemble size, we come
452
+ to the sobering conclusion that TNNR ensembles and ANN ensembles are not equivalent,
453
+ they neither have a uniform slope nor do they converge to similar RMSEs.
454
+ We have just used a single trained TNNR model for all ensemble members while
455
+ training each ANN model from scratch. What happens if we retrain TNNR for each
456
+ single anchor? The results of these experiments are visualized in the red dashed line.
457
+ Since retraining increases the ensemble diversity the red line is consistently below the blue
458
+
459
+ How to get the most out of Twinned Regression Methods
460
+ 8
461
+ Figure 3: How many different pairings are used for training effects the performance of TNNR measured
462
+ by RMSE. The blue solid line corresponds to training TNNR on all possible training pairs. It is
463
+ compared to training TNNR on a randomly chosen but fixed data set of pairs while still employing all
464
+ training data points as inference anchors. A training set multiplier indicates on how many pairs are
465
+ taken into account compared to the original unpaired training set.
466
+ line. Further, we can see that the performance of these independently trained TNNRs
467
+ increases faster with the number of anchors. If the resources are available one can further
468
+ create an ensemble of different TNNR models each having access to all anchors depicted
469
+ in the green line. On seven out of nine data sets this yields clearly the best performance.
470
+ We note that one-anchor TNNR with retraining (red line) converges towards the green for
471
+ more than 32 ensemble members. This tells us that the full ensemble diversity through
472
+ multiple anchors and multiple models can be captured independently.
473
+ A more quantitative version of the out-performance of TNNR ensembles can be
474
+ seen in Table 1 and Table 2. The magnitude of the % improvement of the combined
475
+ anchor+direct ensembling containing multiple TNNs causes a much larger improvement
476
+ than the ensembling of traditional ANNs.
477
+ 6. Effective Training Set Size
478
+ Another improvement over a traditional regression analysis is the increased training set
479
+ size that comes from preparing training sets by pairing each training data point with
480
+
481
+ TrainingSetMultiplier
482
+ Bio Conservation
483
+ Boston Housing
484
+ Concrete Strength
485
+ 0.84
486
+ 3.5
487
+ 5.0
488
+ 0.82
489
+ RMSE
490
+ 3.4
491
+ 4.8
492
+ 0.80
493
+ EE
494
+ 4.6
495
+ 0.78
496
+ 3.2
497
+ 4.4
498
+ 4
499
+ 8
500
+ 16
501
+ 2
502
+ 4
503
+ 8
504
+ 16
505
+ 32
506
+ 1
507
+ 2
508
+ 8
509
+ 16
510
+ 32
511
+ Energy Efficiency
512
+ RCL Circuit
513
+ Test Function
514
+ 11
515
+ 0.0170
516
+ 0.0065
517
+ 1.0
518
+ 0.0165
519
+ 0.0060
520
+ 0.0160
521
+ 0.8
522
+ 0.0055
523
+ 0.0155
524
+ 0.7
525
+ 0.0050
526
+ 1
527
+ a
528
+ 16
529
+ 32
530
+ 1
531
+ 7
532
+ 4
533
+ 8
534
+ 16
535
+ 32
536
+ 1
537
+ 2
538
+ 4
539
+ 8
540
+ 16
541
+ 32
542
+ Wheatstone Bridge
543
+ Red Wine Quality
544
+ Yacht Hydrodynamics
545
+ 0E0'0
546
+ 0.75
547
+ all anchors training
548
+ 0.70
549
+ anchor pairs per
550
+ 0.70
551
+ 0.028
552
+ training data
553
+ MSE
554
+ 0.69
555
+ 0.65
556
+ 0.026
557
+ 0.68
558
+ 0.60
559
+ 0.024
560
+ 0.67
561
+ 0.55
562
+ 2
563
+ 4
564
+ 8
565
+ 16
566
+ 32
567
+ 2
568
+ 4
569
+ 8
570
+ 16
571
+ 32
572
+ 1
573
+ 2
574
+ 4
575
+ 8
576
+ 16
577
+ 32
578
+ IrainingPairsperTrainingData
579
+ Training Pairs per Training Data
580
+ TrainingPairsperTrainingDataHow to get the most out of Twinned Regression Methods
581
+ 9
582
+ Table 3: Best estimates for test RMSEs obtained by Nearest Neighbor Twin Neural Network Regression
583
+ (NNTNNR).
584
+ TNNR
585
+ NN inference
586
+ Gain
587
+ NN train+inference
588
+ Gain
589
+ BC
590
+ 0.8234±0.0144
591
+ 0.8162±0.0155
592
+ 0.87%
593
+ 0.8133±0.0152
594
+ 1.23%
595
+ BH
596
+ 3.3104±0.1202
597
+ 3.2898±0.1202
598
+ 0.62%
599
+ 3.3563±0.1161
600
+ -1.39%
601
+ CS
602
+ 4.4731±0.1242
603
+ 4.4458±0.1091
604
+ 0.61%
605
+ 4.4290±0.1174
606
+ 0.99%
607
+ EE
608
+ 0.7156±0.0204
609
+ 0.7056±0.0193
610
+ 1.4%
611
+ 0.6825±0.0216
612
+ 4.63%
613
+ RCL
614
+ 0.0158±0.0002
615
+ 0.0151±0.0003
616
+ 3.98%
617
+ 0.0140±0.0002
618
+ 11.33%
619
+ TF
620
+ 0.0050±0.0001
621
+ 0.0028±0.0002
622
+ 43.65%
623
+ 0.0021±0.0003
624
+ 57.26%
625
+ WSB
626
+ 0.0233±0.0006
627
+ 0.0224±0.0009
628
+ 4.06%
629
+ 0.0236±0.0009
630
+ -1.01%
631
+ WN
632
+ 0.6998±0.006
633
+ 0.6951±0.0062
634
+ 0.68%
635
+ 0.6944±0.006
636
+ 0.77%
637
+ YH
638
+ 0.5977±0.0344
639
+ 0.5184±0.0331
640
+ 13.26%
641
+ 0.5009±0.0333
642
+ 16.19%
643
+ every other training data point. This transforms a training set of size n into a pairwise
644
+ training set of size n2. In this section, we measure if the increase in the number of
645
+ pairings leads to an increase in accuracy.
646
+ To address this question we look at several curves in Fig. 3. In this figure, we
647
+ compare the effect of increasing the effective pairwise training data set on the accuracy.
648
+ For this purpose, we define the training set multiplier as the number of pairs that are
649
+ created from the original training set to produce the paired training set. A training
650
+ set multiplier of one means that each training data point is paired with only one other
651
+ randomly chosen (without replacement) but fixed data point (on average this means each
652
+ training data point is used twice). Increasing the training set multiplier to the size of the
653
+ training set converges to the standard formulation of twinned regression methods. We
654
+ can see that in all data sets, except two, increasing the training set multiplier increases
655
+ the performance of TNNR. More precisely, a training set multiplier of ≈ 8 − 16 seems
656
+ already to be enough to reach the accuracy of standard TNNR. It is important to
657
+ note, that on two data sets, namely Bio Conservation(BC) and Red Wine Quality(WN),
658
+ increasing the training set multiplier has the effect of reducing the performance. This
659
+ coincides with other algorithms beating TNNR (Fig. 2,Fig. 5) and is a sign that TNNR
660
+ might not be suitable for such regression tasks. A data scientist using TNNR might do
661
+ a training set multiplier check, if he finds a decreasing accuracy while increasing the
662
+ multiplier, he can reject TNNR as optimal regression algorithm.
663
+ 7. Nearest Neighbor TNNR
664
+ In this manuscript we propose a new regression algorithm based on a combination
665
+ of k-nearest neighbor regression and TNNR, which of course could be implemented
666
+ for various baseline twinned regression algorithms. In standard twinned regression
667
+ methods the model learns to predict differences between the targets of two arbitrary
668
+
669
+ How to get the most out of Twinned Regression Methods
670
+ 10
671
+ Figure 4: Effect of nearest neighbor pairing on TNNR measured in terms of RMSE vs the number
672
+ of nearest neighbors used if applicable. The solid blue line marks the performance of the original
673
+ TNNR. The dashed blue line displays the results of TNNR trained on all possible pairs while performing
674
+ inference only using nearest neighbor anchors. The dashed orange line is produced by restricting pairs
675
+ to nearest neighbors for training and inference.
676
+ data points. This model is then employed to create an ensemble prediction via averaging
677
+ the approximations of the differences between the target value of a new data point and
678
+ all anchor data points, see (2). However, not all of these anchor data points might be of
679
+ equal importance for the prediction. That is why in this section we restrict the anchor
680
+ points to the nearest neighbors. For this purpose, we define the notation NN(i,m) as
681
+ the set of m nearest neighbors of a data point xi ∈ X within the training set xi ∈ Xtrain
682
+ to reformulate the prediction:
683
+ ypred
684
+ i
685
+ = 1
686
+ m
687
+
688
+ j∈NN(i,m)
689
+ (F(xi,xtrain
690
+ j
691
+ ) + ytrain
692
+ j
693
+ )
694
+ (4)
695
+ While we have defined the prediction using nearest neighbors during the inference
696
+ phase, it is an open question whether it is better to train the model to predict
697
+ differences between target values of generic data points or between neighboring data
698
+ points corresponding to the same number of nearest neighbors in the inference phase. The
699
+ different training versions are compared in Fig. 4 where the baseline is set by standard
700
+
701
+ Nearest Neighbor Training
702
+ Bio Conservation
703
+ Boston Housing
704
+ Concrete Strength
705
+ 4.75
706
+ 5.4
707
+ 1.05
708
+ 4.50
709
+ 5.2
710
+ 1.00
711
+ 4.25
712
+ 5.0
713
+ 4.00
714
+ 4.8
715
+ 06'0
716
+ 3.75
717
+ 0.85
718
+ 3.50
719
+ 4.6
720
+ 3.25
721
+ 4.4
722
+ 0.80
723
+ 2
724
+ 4
725
+ 8
726
+ 16
727
+ 37
728
+ 64
729
+ 1
730
+ 4
731
+ 8
732
+ 16
733
+ 64
734
+ 8
735
+ 16
736
+ 32
737
+ 64
738
+ Energy Efficiency
739
+ RCL Circuit
740
+ Test Function
741
+ 1.6
742
+ 0.008
743
+ 0.030
744
+ 1.4
745
+ 0.007
746
+ 0.025
747
+ 0.006
748
+ 0.005
749
+ 1.0
750
+ 0.020
751
+ 0.004
752
+ 0.8
753
+ E00'0
754
+ 0.015
755
+ 0.002
756
+ 8
757
+ 16
758
+ 32
759
+ 64
760
+ 2
761
+ 4
762
+ 8
763
+ 16
764
+ 32
765
+ 64
766
+ 16
767
+ 32
768
+ 64
769
+ Wheatstone Bridge
770
+ Red Wine Quality
771
+ Yacht Hydrodynamics
772
+ 0.06
773
+ all anchor training
774
+ 0.85
775
+ 1.0
776
+ all training pairs
777
+ 0.05
778
+ 0.80
779
+ nearest neighbor
780
+ 0.8
781
+ training pairs
782
+ 0.75
783
+ 0.03
784
+ 0.6
785
+ 0.70
786
+ 0.02
787
+ 2
788
+ A
789
+ 8
790
+ 16
791
+ 32
792
+ 64
793
+ 2
794
+ 4
795
+ 8
796
+ 16
797
+ 32
798
+ 64
799
+ 2
800
+ 4
801
+ 8
802
+ 16
803
+ 32
804
+ 64
805
+ # Nearest Neighbors
806
+ # Nearest Neighbors
807
+ # Nearest NeighborsHow to get the most out of Twinned Regression Methods
808
+ 11
809
+ Figure 5: Comparison of k-NN regression and TNNR with different numbers of nearest neighbor
810
+ training pairs measured by RMSE vs the number of neighbors for the dashed lines. The solid blue
811
+ corresponds to normal TNNR with access to all possible pairs during training and inference. The blue
812
+ dashed line restricts the pairs to be nearest neighbors during inference. The orange dashed line restricts
813
+ the pairs to be nearest neighbors during training and inference. The green dashed line describes k-NN.
814
+ TNNR. We emphasize that both versions of training obey the same principle for selecting
815
+ nearest neighbors during inference. When only using the very nearest neighbor as an
816
+ anchor for inference, we can see that for 7 out of 9 data sets both training versions
817
+ underperform traditional TNNR, while training on all possible pairs performs better than
818
+ just training on neighboring training data points. This picture changes as we increase
819
+ the number of nearest neighbors. On all data sets both versions of nearest neighbor
820
+ TNNR converge to standard TNNR in the limit of increasing the number of neighbors to
821
+ the training set size. In 7 out of 9 data sets there is a sweet spot where nearest neighbor
822
+ TNNR with nearest neighbor training outperforms at around 16 to 64 neighbors, in 3
823
+ out of those data sets nearest neighbor training outperforms by a very large margin
824
+ culminating in reducing the RMSE by ≈ 60% on the TF data set, see Table 3. We note
825
+ that this is the data set with zero noise.
826
+
827
+ NearestNeighborTrainingvsk-NN
828
+ Bio Conservation
829
+ Boston Housing
830
+ Concrete Strength
831
+ 12
832
+ 1.0
833
+ 10
834
+ RMSE
835
+ 0.9
836
+ 8
837
+ 0.8
838
+ i
839
+ 16
840
+ 32
841
+ 64
842
+ 4
843
+ 64
844
+ 2
845
+ 8
846
+ 16
847
+ 32
848
+ 64
849
+ Energy Efficiency
850
+ RCL Circuit
851
+ Test Function
852
+ 5
853
+ 0.25
854
+ 0.10
855
+ 4
856
+ 0.20
857
+ 0.08
858
+ E
859
+ 0.15
860
+ 0.06
861
+ RM
862
+ 0.10
863
+ 0.04
864
+ 2
865
+ 0.05
866
+ 0.02
867
+ 0.00
868
+ 1
869
+ 4
870
+ 8
871
+ 16
872
+ 32
873
+ 64
874
+ 1
875
+ 2
876
+ 4
877
+ 8
878
+ 16
879
+ 32
880
+ 64
881
+ 2
882
+ 4
883
+ 8
884
+ 16
885
+ 32
886
+ 64
887
+ Wheatstone Bridge
888
+ Red Wine Quality
889
+ Yacht Hydrodynamics
890
+ 10
891
+ 0.20
892
+ 0.85
893
+ 8
894
+ all anchor training
895
+ 0.15
896
+ 0.80
897
+ all training pairs
898
+ MSE
899
+ nearest neighbor
900
+ 0.10
901
+ 0.75
902
+ training pairs
903
+ k-NN regression
904
+ 0.05
905
+ 0.70
906
+ 2
907
+ 4
908
+ 8
909
+ 16
910
+ 32
911
+ 64
912
+ 4
913
+ 8
914
+ 16
915
+ 32
916
+ 64
917
+ 2
918
+ 4
919
+ 8
920
+ 16
921
+ 32
922
+ 64
923
+ # Nearest Neighbors
924
+ # Nearest Neighbors
925
+ # Nearest NeighborsHow to get the most out of Twinned Regression Methods
926
+ 12
927
+ 8. TNNR vs k-NN
928
+ A natural question is how nearest-neighbor TNNR(NNTNNR) related to k-NN regression.
929
+ Nearest neighbor TNNR can be related to k-NN regression through setting F(xi,xj) ≡ 0,
930
+ then
931
+ ypred
932
+ i
933
+ = 1
934
+ m
935
+
936
+ j∈NN(i,m)
937
+
938
+ ⎜⎜
939
+
940
+ F(xi,xtrain
941
+ j
942
+ )
943
+ �����������������������������������������������������������
944
+ 0
945
+ +ytrain
946
+ j
947
+
948
+ ⎟⎟
949
+
950
+ = 1
951
+ m
952
+ m
953
+
954
+ j∈NN(i)
955
+ ytrain
956
+ j
957
+ (5)
958
+ Assuming F(xi,xj) would just be a minor contribution to k-NN regression we would see
959
+ a qualitatively similar performance of NNTNNR. In order to test this statement, we
960
+ visualize the behavior of k-NN and NNTNNR in Fig. 5. In this figure we can clearly see,
961
+ that NNTNNR beats k-NN regression by an enormous margin on 7 out of 9 data sets.
962
+ However, there are two data sets, namely BC, WN where k-NN is the winner. Again, we
963
+ note that these data sets are exactly where the expected TNNR mechanism fails Fig. 3
964
+ and it coincides with ANN ensembles outperforming TNNR Fig. 2. Further, the number
965
+ of optimal TNNR anchors is much larger than the optimal number of neighbors in k-NN.
966
+ 9. Reformulation Benefits
967
+ After having discussed the impacts of ensembling and the increased effective training
968
+ set size, we have now finally the tools to partially answer the question of whether the
969
+ reformulation to the dual problem itself contributes to the increased accuracy of twinned
970
+ regression methods. We have related TNNR to normal ANNs in (3) and connected
971
+ NNTNNR to k-NN regression in (5). If TNNR would be a glorified form of ANN or k-NN
972
+ regression, the performance of TNNR could be related qualitatively to neural networks
973
+ or k-NN. However, as we can see by comparing with ANNs in Fig. 2 or k-NN Fig. 5, it
974
+ is clear that TNNR has a distinct performance profile that beats ANNs and k-NN on
975
+ the same 7 of 9 data sets and underperforms both on the remaining two data sets. As
976
+ we know from testing the impact of the increased training set size Fig. 3, these are the
977
+ data sets where increasing the data sets has an adverse effect on accuracy, which signals
978
+ that the TNNR mechanism fails while ANN and k-NN continue to perform normally.
979
+ All these facts support the conclusion that the reformulation to a dual problem itself
980
+ tends to have a positive effect on accuracy on most data sets.
981
+ 10. Miniaturizing accurate networks
982
+ Often it is required to store fully trained neural networks on hardware that has strict
983
+ memory limitations. This might be on chips that allow for autotuning of quantum
984
+ dots [9] or in self driving vehicles [10]. In these cases it is required to consider the
985
+ trade-off between accuracy and memory requirements. Ensembles of neural networks
986
+
987
+ How to get the most out of Twinned Regression Methods
988
+ 13
989
+ Figure 6: Memory requirements for storing real ensembles in terms of number of parameters stored vs
990
+ the ensemble size. The architecture used corresponds to the architecture that was used to produce the
991
+ results in this manuscript. Different architectures cause quantitatively different plots, but qualitatively
992
+ the behave similarly. The solid lines indicate the memory requirements for storing an ensemble of
993
+ independently trained ANN models for different numbers of features f describing each data instance.
994
+ The dashed lines correspond to ensembles generated by storing one TNNR model and a representative
995
+ number of anchors which can be combined to produce ensembles.
996
+ tend to be more accurate than single neural networks, however, storing them requires
997
+ linearly more memory capacity per each ensemble member. TNNR provides an elegant
998
+ solution to this problem, because in order to store an ensemble of predictions it only
999
+ requires the storage of a single set of trained weights and biases together with one
1000
+ anchor data point per ensemble member. Our neural network architectures are chosen
1001
+ such that they produce accurate results on all nine considered data sets. Thus, we
1002
+ have chosen an architecture with two hidden layers, each with 128 neurons. In Fig. 6
1003
+ we visualize the number of parameters that are required to be stored in the case of
1004
+ traditional ANN and TNNR ensembles for different feature sizes of the input data. It can
1005
+ be seen that in all cases TNNR parameters plus anchors need far less storage capacity
1006
+ to achieve a similar ensemble size as ANNs. Of course in practice the optimal neural
1007
+ network architecture and its number of parameters varies between problems, meaning
1008
+ our quantitative analysis of memory requirements might not generalize to other problems.
1009
+ However, the qualitative trend remains the same as long as the feature size is smaller
1010
+ than the number of parameters of the model.
1011
+ 11. Making other models semi-supervised
1012
+ In this section we explore a simple framework to train any twinned regression method in
1013
+ a semi-supervised manner. The idea is based on the semi-supervised regression method
1014
+ devised in [2] for TNNR. As we can see in Fig. 1 the dual formulation requires machine
1015
+ learning models to predict differences F(xi,xj) = yi − yh between target values instead of
1016
+ the targets f(x) = y, themselves. One advantage of this formulation is that a correct
1017
+
1018
+ Number of Parameters to store ensemble
1019
+ 106
1020
+ ANN ensemble f=4
1021
+ ANN ensemble f=13
1022
+ parameters
1023
+ ANN ensemble f=100
1024
+ TNN anchors f=4
1025
+ TNN anchors f=13
1026
+ 103
1027
+ TNN anchors f=100
1028
+ #
1029
+ 0
1030
+ 5
1031
+ 10
1032
+ 15
1033
+ 20
1034
+ 25
1035
+ 30
1036
+ Ensemble sizeHow to get the most out of Twinned Regression Methods
1037
+ 14
1038
+ Figure 7: Transductive semi-supervised learning with random forests. Random forests have been
1039
+ supplied with loops containing unlabelled data points. The magnitude of the influence the loops have
1040
+ on the training process is measured by Λ. Tuning Λ ≈ 1 reduces the RMSE on almost all data sets
1041
+ compared to purely supervised learning (solid blue line).
1042
+ Table 4: Best estimates for test RMSEs obtained by Semi-supervised random forests.
1043
+ Supervised RF
1044
+ Semi-Supervised RF
1045
+ Improvement
1046
+ Bio Conservation
1047
+ 0.7367±0.0081
1048
+ 0.7357±0.0078
1049
+ 0.14%
1050
+ Boston Housing
1051
+ 3.8408±0.1322
1052
+ 3.8281±0.1356
1053
+ 0.33%
1054
+ Concrete Strength
1055
+ 5.8813±0.2242
1056
+ 5.8440±0.2276
1057
+ 0.63%
1058
+ Energy Efficiency
1059
+ 1.9112±0.0417
1060
+ 1.8919±0.0411
1061
+ 1.01%
1062
+ RCL Circuit
1063
+ 0.2934±0.0068
1064
+ 0.2932±0.0068
1065
+ 0.05%
1066
+ Test Function
1067
+ 0.0857±0.0022
1068
+ 0.0855±0.0021
1069
+ 0.19%
1070
+ Wheatstone Bridge
1071
+ 0.0958±0.0025
1072
+ 0.0958±0.0022
1073
+ 0.02%
1074
+ Red Wine Quality
1075
+ 0.6066±0.0083
1076
+ 0.6045±0.0084
1077
+ 0.35%
1078
+ Yacht Hydrodynamics
1079
+ 1.1138±0.0383
1080
+ 1.1091±0.0384
1081
+ 0.42%
1082
+
1083
+ RandomForestSemi-SupervisedLearning
1084
+ Bio Conservation
1085
+ Boston Housing
1086
+ Concrete Strength
1087
+ 0.742
1088
+ 3.86
1089
+ 5.88
1090
+ 0.740
1091
+ 3.85
1092
+ 5.87
1093
+ RMSE
1094
+ 0.738
1095
+ 5.86
1096
+ 3.84
1097
+ 5.85
1098
+ 0.736
1099
+ E8E
1100
+ 0.001
1101
+ 0.010
1102
+ 0.100
1103
+ 1.000
1104
+ 10.000
1105
+ 0.001
1106
+ 0.010
1107
+ 0.100
1108
+ 1.000
1109
+ 10.000
1110
+ 0.001
1111
+ 0.010
1112
+ 0.100
1113
+ 1.000
1114
+ 10.000
1115
+ Energy Efficiency
1116
+ RCL Circuit
1117
+ Test Function
1118
+ 1910
1119
+ 0.302
1120
+ 0.092
1121
+ 1.905
1122
+ 0.300
1123
+ MSE
1124
+ 0.090
1125
+ 0.298
1126
+ 0.296
1127
+ 0.088
1128
+ 1.895
1129
+ 0.294
1130
+ 0.086
1131
+ 0.001
1132
+ 0.010
1133
+ 0.100
1134
+ 1.000
1135
+ 10.000
1136
+ 0.001
1137
+ 0.010
1138
+ 0.100
1139
+ 1.000
1140
+ 10.000
1141
+ 0.001
1142
+ 0.010
1143
+ 0.100
1144
+ 1.000
1145
+ 10.000
1146
+ Wheatstone Bridge
1147
+ Red Wine Quality
1148
+ Yacht Hydrodynamics
1149
+ 0.09725
1150
+ 0.608
1151
+ 115
1152
+ TNN
1153
+ 0.09700
1154
+ -+ all training pairs
1155
+ 0.09675
1156
+ 0.607
1157
+ 1.14
1158
+ ISE
1159
+ 0.09650
1160
+ 0.606
1161
+ 1.13
1162
+ 0.09625
1163
+ 112
1164
+ 0.09600
1165
+ 0.605
1166
+ 0.09575
1167
+ 1.11
1168
+ 0.001
1169
+ 0.010
1170
+ 0.100
1171
+ 1.000
1172
+ 10.000
1173
+ 0.001
1174
+ 0.010
1175
+ 0.100
1176
+ 1.000
1177
+ 10.000
1178
+ 0.001
1179
+ 0.010
1180
+ 0.100
1181
+ 1.000
1182
+ 10.000
1183
+ Loop Consistency Weight A
1184
+ Loop Consistency Weight A
1185
+ Loop Consistency Weight ^How to get the most out of Twinned Regression Methods
1186
+ 15
1187
+ solution would satisfy loop consistency F(x1,x2) + F(x2,x3) + F(x3,x1) = 0.
1188
+ Hence, we propose the following algorithm that is applicable to all twinned regression
1189
+ methods: At first we train the regression model on the labelled training data. This model
1190
+ is then used to predict the differences between targets along loops randomly sampled
1191
+ from an unlabelled data set. For each loop an adjustment a defined by
1192
+ a = F(x1,x2) + F(x2,x3) + F(x3,x1)
1193
+ (6)
1194
+ is then used to propose a label yij = F(xi,xj)−Λ×a for each combination of xi,xj within
1195
+ the loops. Here, Λ is the loop weight hyper-parameter. The unlabelled data set together
1196
+ with the proposed labels is then added to the labelled training set, on which the model
1197
+ is retrained. The algorithm is further depicted in Fig. 1.
1198
+ We apply this idea to the pairwise/twinned random forest regression proposed in [3],
1199
+ which was originally aimed at solving regression problems on small data sets in chemistry.
1200
+ Since random forests don’t scale as well with large data sets, we restrict our data sets to
1201
+ 100 training and 100 test data points. The details of the training process are outlined in
1202
+ section Appendix D.
1203
+ Before applying the semi-supervised learning strategy, we convince ourselves that
1204
+ twinned random forest regression is suitable for the test bed consisting of the nine data
1205
+ sets (Appendix A) used in this paper. The corresponding results can be seen in Table D1.
1206
+ Twinned random forests perform equally well, or slightly worse, compared to traditional
1207
+ random forests on three data sets (BC,EE,WN). It moderately outperforms on four data
1208
+ sets (BH,CS,RCL,YH) and it massively outperforms by cutting the RMSE by more than
1209
+ 35% on two data sets (TF,WSB).
1210
+ After having convinced ourselves of the superior performance of twinned random
1211
+ forests, we apply our semi-supervised learning framework in a transductive manner.
1212
+ Transductive means that the test data is used as unlabelled training data. This is in
1213
+ contrast to inductive semi-supervised learning where the unlabelled training data would
1214
+ be kept separate from the final test data. The final results are depicted in Fig. 7 for
1215
+ various choices of the loop weight Λ. We can clearly see, that the optimal choice of
1216
+ Λ ≈ 1 leads to a reduction of RMSE on six out of nine data sets. However, the relative
1217
+ improvement from semi-supervised learning is very small, as shown in Table 4 and most of
1218
+ the time less than 1%. If we compare these results with other semi-supervised regression
1219
+ algorithms on the same data sets from [2], one can observe that this improvement is
1220
+ significantly less than semi-supervised TNNR and slightly less than co-training with
1221
+ neural networks.
1222
+ 12. Negative Results
1223
+ Let us briefly discuss in this section different ideas that we tried during our experiments
1224
+ but did not lead to a consistent improvement of twinned regression methods.
1225
+ While exploring the ideal weighting of anchor during the inference phase of twinned
1226
+ regression methods, a straightforward idea was to try incorporating the intrinsic
1227
+
1228
+ How to get the most out of Twinned Regression Methods
1229
+ 16
1230
+ Figure 8: Training and inference time comparison in seconds between different versions of TNNR and
1231
+ ANN regression. The solid orange line indicates the time for training and inference in the case of ANNs.
1232
+ The solid blue line is the same for original TNNR. The blue dotted line indicates restricting the possible
1233
+ training/inference pairs to nearest neighbors, the x-axis corresponds to the number of neighbors.
1234
+ uncertainty metrics [1]. These include the ensemble standard deviation and the violation
1235
+ of loop consistency. Anchors with a lower uncertainty metric should be weighted higher
1236
+ than anchors with high uncertainty metrics. While we observed some benefit, we could
1237
+ not consistently show that this process improved the accuracy in a statistically significant
1238
+ manner. We believe that other uncertainty metrics that unrelated to the intrinsic
1239
+ consistency metrics might be better suited as for example in Gaussian processes.
1240
+ Our initial plan when devising a strategy to adopt the semi-supervised learning
1241
+ framework from [2] to other algorithms was based on an iterative algorithm. After
1242
+ training the underlying twinned regression algorithm, the model would predict labels
1243
+ on unknown data points. Randomly sampling loops containing these data points would
1244
+ allow us to check for loop consistency. The unknown data points would then be added
1245
+ to the training data set with a label that corresponds to the original prediction slightly
1246
+ modified in the direction which satisfies the loop condition. The idea was to iterative
1247
+ refine the labels by repeating this process. However, it turned out that many times this
1248
+ process would either not converge for Λ > 1/3, or eventually converge to sub-optimal
1249
+ solutions, worse than the initial supervised version.
1250
+ Combining k-NN regression with TNNR was also aimed at reducing the
1251
+
1252
+ Training+InferenceTimeComparison
1253
+ Bio Conservation
1254
+ Boston Housing
1255
+ Concrete Strength
1256
+ 1750
1257
+ seconds
1258
+ 2000
1259
+ 1500
1260
+ 6000
1261
+ 1500
1262
+ 1250
1263
+ 1000
1264
+ 4000
1265
+ Time
1266
+ 1000
1267
+ 750
1268
+ 500
1269
+ 2000
1270
+ 500
1271
+ 2
1272
+ 4
1273
+ 8
1274
+ 16
1275
+ 32
1276
+ 64
1277
+ i
1278
+ 2
1279
+ 8
1280
+ 16
1281
+ 32
1282
+ 64
1283
+ i
1284
+ 4
1285
+ 8
1286
+ 16
1287
+ 32
1288
+ 64
1289
+ Energy Efficiency
1290
+ RCL Circuit
1291
+ Test Function
1292
+ 6000
1293
+ 25000
1294
+ 3500
1295
+ seconds
1296
+ 5000
1297
+ 20000
1298
+ 3000
1299
+ 4000
1300
+ 2500
1301
+ 15000
1302
+ 3000
1303
+ 2000
1304
+ Time
1305
+ 10000
1306
+ 2000
1307
+ 1500
1308
+ 5000
1309
+ 1000
1310
+ 1000
1311
+ 16
1312
+ 32
1313
+ 64
1314
+ 2
1315
+ 4
1316
+ 8
1317
+ 1
1318
+ :
1319
+ 4
1320
+ 8
1321
+ 16
1322
+ 32
1323
+ 64
1324
+ 1
1325
+ 2
1326
+ 4
1327
+ 8
1328
+ 16
1329
+ 32
1330
+ 64
1331
+ Wheatstone Bridge
1332
+ Red Wine Quality
1333
+ Yacht Hydrodynamics
1334
+ 800
1335
+ 5000
1336
+ seconds
1337
+ 1200
1338
+ 600
1339
+ 4000
1340
+ 1000
1341
+ ANN
1342
+ 3000
1343
+ 800
1344
+ TNN
1345
+ 400
1346
+ nearest neighbor
1347
+ 2000
1348
+ 600
1349
+ training data
1350
+ 200
1351
+ 1000
1352
+ 400
1353
+ 200
1354
+ 2
1355
+ 4
1356
+ 8
1357
+ 16
1358
+ 32
1359
+ t9
1360
+ 1
1361
+ 2
1362
+ 4
1363
+ 8
1364
+ 16
1365
+ 32
1366
+ 64
1367
+ 1
1368
+ 2
1369
+ 4
1370
+ 8
1371
+ 16
1372
+ 32
1373
+ 64
1374
+ # Nearest Neighbors
1375
+ # Nearest Neighbors
1376
+ # Nearest NeighborsHow to get the most out of Twinned Regression Methods
1377
+ 17
1378
+ computational time. As explored in [1], the training time of twinned regression methods
1379
+ scales poorly towards larger data sets, mostly caused by the increase in the effective
1380
+ data set size through pairing of data points. While for many baseline algorithms a
1381
+ clear relationship between data set size and training time, for neural networks it is less
1382
+ known. As neural networks training time scales very favorably with training set size
1383
+ we focused on TNNR to test the training time improvement from only using nearest
1384
+ neighbor paring during training phase. In Fig. 8 we can see that there is a tendency for a
1385
+ reduced computational cost on most data sets. However, the training time scaling is too
1386
+ minor and inconsistent to use it as a sole justification to use NNTNNR over traditional
1387
+ TNNR.
1388
+ 13. Conclusion
1389
+ Twinned regression is a simple and versatile framework to improve performance through
1390
+ intrinsic ensembling and semi-supervised learning on small to medium-sized data sets.
1391
+ In this article, we have answered several questions about the nature of the twinned
1392
+ regression framework. Further, we devised several improvements to further improve the
1393
+ already state-of-the-art performance of twinned regression methods.
1394
+ We compared the ensemble behaviour of traditional ANNs and TNNR. For this
1395
+ purpose we mapped single anchor TNNR to an equivalent ANN model during inference
1396
+ phase. By visualizing the results of these examinations in Fig. 2 we can see that the
1397
+ performance an ensemble of single anchor TNNs converges towards an ensemble of full
1398
+ anchor TNNs at around 32 ensemble members. Increasing the number of anchors would
1399
+ not yield any additional gain. This suggests that the intrinsic ensemble diversity of
1400
+ TNNR is a subset of the diversity that can be achieved by retraining the network for
1401
+ each anchor. The combined anchor+direct ensemling containing multiple TNNs causes a
1402
+ much larger improvement than the ensembling of traditional ANNs as can be seen in
1403
+ Table 1 and Table 2. This explains one element of the outperformance of TNNR over
1404
+ ANN regression.
1405
+ Further, we examined what effect the increased training set size through pairing
1406
+ data points has on the TNN accuracy, Fig. 3. Generally, more pairings per training data
1407
+ point reduced the RMSE. However, to our surprise not all data sets benefited from this
1408
+ pairing, on two data sets (BC,WN) TNNR had the lowest RMSE if only one pairing
1409
+ per training data point was allowed. By comparing this malfunction to results in Fig. 2
1410
+ and Fig. 5, we can see that it occurs in exactly the data sets where k-NN regression
1411
+ and ANN ensembles outperform TNNR, signaling a breakdown of the performance
1412
+ increasing factors of TNNR. By looking at the properties of the data sets it seems like
1413
+ twinned regression methods perform best on continuous data sets where the label can be
1414
+ approximated through a deterministic function.
1415
+ We also pointed at another advantage of TNNR in the case where it is impossible to
1416
+ store a large number of parameters, but one wants to retain the advantages of ensembles.
1417
+ TNNR provides the possibility to generate an ensemble of predictions just by storing
1418
+
1419
+ How to get the most out of Twinned Regression Methods
1420
+ 18
1421
+ one model and some anchors, which is usually significantly smaller than storing multiple
1422
+ ANN models, see Fig. 6.
1423
+ While exploring the ideal weighting of anchors during the inference phase of TNNR,
1424
+ we found that nearest-neighbor predictions tend to yield the most accurate results.
1425
+ This lead us to develop nearest-neighbor TNNR (NNTNNR) which is a combination
1426
+ of the k-nearest neighbor algorithm and TNNR. There are two versions of NNTNNR,
1427
+ one which respects nearest neighbors during both training and inference and another
1428
+ version that only restricts nearest neighbors during the inference phase. Restricting
1429
+ to nearest-neighbor training tends to yield slightly better results Fig. 4. Both versions
1430
+ outperform standard TNNR especially on low noise data sets, see Table 3. It is important
1431
+ to note that NNTNNR is not just a minor improvement to k-NN regression since it has
1432
+ a very different performance profile when it comes to varying the number of anchors, or
1433
+ nearest neighbors, respectively, as can be seen in Fig. 5.
1434
+ We devised a semi-supervised regression framework based on enforcing loop
1435
+ consistency that can be applied to any twinned regression algorithm, but we tested it for
1436
+ random forests. This method yielded a clearly visible improvement over their supervised
1437
+ counterparts as can be seen in Fig. 7. However, the magnitude of the reduction of the
1438
+ RMSE is relatively small and almost always less than 1%, see Table 4. Comparing these
1439
+ results with other semi-supervised regression algorithms on the same data sets from [2],
1440
+ we can see that this improvement is significantly less than semi-supervised TNNR and
1441
+ slightly less than co-training with neural networks.
1442
+ The code supporting this publication is available at [18].
1443
+ 14. Acknowledgements
1444
+ Let us thank Zurab Jashi for his help with the random forest code. This work was
1445
+ supported by Mitacs and Homes Plus Magazine Inc. through the Mitacs Accelerate
1446
+ program. We also acknowledge Compute Canada for computational resources. We thank
1447
+ the National Research Council of Canada for their partnership with Perimeter on the
1448
+ PIQuIL. Research at Perimeter Institute is supported in part by the Government of
1449
+ Canada through the Department of Innovation, Science and Economic Development
1450
+ Canada and by the Province of Ontario through the Ministry of Economic Development,
1451
+ Job Creation and Trade.
1452
+
1453
+ How to get the most out of Twinned Regression Methods
1454
+ 19
1455
+ Appendix A. Data sets
1456
+ Table A1: Data sets
1457
+ Name
1458
+ Key
1459
+ Size
1460
+ Features
1461
+ Type
1462
+ Bio Concentration
1463
+ BC
1464
+ 779
1465
+ 14
1466
+ Discrete, Continuous
1467
+ Boston Housing
1468
+ BH
1469
+ 506
1470
+ 13
1471
+ Discrete, Continuous
1472
+ Concrete Strength
1473
+ CS
1474
+ 1030
1475
+ 8
1476
+ Continuous
1477
+ Energy Efficiency
1478
+ EF
1479
+ 768
1480
+ 8
1481
+ Discrete, Continuous
1482
+ RCL Circuit Current
1483
+ RCL
1484
+ 4000
1485
+ 6
1486
+ Continuous
1487
+ Test Function
1488
+ TF
1489
+ 1000
1490
+ 2
1491
+ Continuous
1492
+ Red Wine Quality
1493
+ WN
1494
+ 1599
1495
+ 11
1496
+ Discrete, Continuous
1497
+ Wheatstone Bridge Voltage
1498
+ WSB
1499
+ 200
1500
+ 4
1501
+ Continuous
1502
+ Yacht Hydrodynamics
1503
+ YH
1504
+ 308
1505
+ 6
1506
+ Discrete
1507
+ The test function (TF) data set created from the equation
1508
+ F(x1,x2) = x3
1509
+ 1 + x2
1510
+ 1 − x1 − 1 + x1x2 + sin(x2)
1511
+ (A.1)
1512
+ and zero noise.
1513
+ The output in the RCL circuit current data set (RCL) is the current through an
1514
+ RCL circuit, modeled by the equation
1515
+ I0 = V0 cos(ωt)/
1516
+
1517
+ R2 + (ωL − 1/(ωC))2
1518
+ (A.2)
1519
+ with added Gaussian noise of mean 0 and standard deviation 0.1.
1520
+ The output of the Wheatstone Bridge voltage (WSB) is the measured voltage given
1521
+ by the equation
1522
+ V = U(R2/(R1 + R2) − R3/(R2 + R3))
1523
+ (A.3)
1524
+ with added Gaussian noise of mean 0 and standard deviation 0.1.
1525
+ Appendix B. Bias-Variance Tradeoff and Ensembles
1526
+ In a regression problem, one is tasked with finding the true labels on yet unlabelled
1527
+ data points through the estimation of a function f(x) = y. Given a finite training data
1528
+ set D we denote this approximation ˆf(x;D). The expected mean squared error can
1529
+ be decomposed by three sources of error, bias error BiasD[ ˆf(x;D)] , variance error
1530
+ VarD [ ˆf(x;D)] and intrinsic error of the data set σ.
1531
+ MSE = Ex {BiasD[ ˆf(x;D)]2 + VarD [ ˆf(x;D)]} + σ2.
1532
+ (B.1)
1533
+
1534
+ How to get the most out of Twinned Regression Methods
1535
+ 20
1536
+ If we replace the estimator by an ensemble of two functions ˆf(x;D) = 1/2 ˆfA(x;D) +
1537
+ 1/2 ˆfB(x;D), each exhibiting the same bias and variance as the original estimator, then
1538
+ we can decompose the MSE
1539
+ MSE = Ex {BiasD[1/2 ˆfA(x;D) + 1/2 ˆfB(x;D)]2 + VarD [1/2 ˆfA(x;D) + 1/2 ˆfB(x;D)]} + σ2
1540
+ (B.2)
1541
+ = Ex {BiasD[ ˆf(x;D)]2 + VarD [1/2 ˆfA(x;D)] + VarD [1/2 ˆfB(x;D)]
1542
+ (B.3)
1543
+ + 2CovD [1/2 ˆfA(x;D),1/2 ˆfB(x;D)]} + σ2
1544
+ (B.4)
1545
+ = Ex {BiasD[ ˆf(x;D)]2 + 1/2VarD [ ˆfA(x;D)] + 1/2CovD [ ˆfA(x;D), ˆfB(x;D)]} + σ2
1546
+ (B.5)
1547
+ The more uncorrelated ˆfA(x;D) and ˆfB(x;D) are, the smaller is the ratio between
1548
+ variance and covariance. Thus an ensemble consisting of weakly correlated ensemble
1549
+ members reduce the MSE by circumventing the bias-variance tradeoff. By induction this
1550
+ argument extends to larger ensemble sizes.
1551
+ Appendix C. Neural Network Architectures
1552
+ Both our traditional neural network regression and twin neural network regression
1553
+ methods are build using the same architecture build using the tensorflow library [19].
1554
+ They consist of two hidden layers with 128 neurons each and relu activation functions.
1555
+ The final layer contains one single neuron without an activation function. We train
1556
+ our neural networks using the adadelta optimizer, and use learning rate and early stop
1557
+ callbacks that reduce the learning rate by 50% or stop training if the loss stops decreasing.
1558
+ For this reason it is enough to set the number of epochs large enough such that the early
1559
+ stopping is always triggered, in our cases this is 2000 for ANNs and 10000 for TNNR.
1560
+ The batchsizes are in both cases 16.
1561
+ Appendix D. Random Forests
1562
+ The random forests in this article use the scikit-learn library [20]. They are trained
1563
+ on a subset of all data sets: from each data set, we randomly sample 100 training
1564
+ data points and 100 test data points. We use five-fold cross-validation to optimize
1565
+ the following hyper-parameters of our random forests: ’max depth’ ∈ [4,8,16,32,64],
1566
+ ’max features’ ∈ [0.33,0.667,1.0], ’min samples leaf’ ∈ [1,2,5], ’min samples split’
1567
+ ∈ [2,4,8], ’n estimators’ ∈ [100,300,600]. Both, the traditional and the twinned random
1568
+ forests choose their optimal hyper-parameters from the same pool. It is important to
1569
+ note that for semi-supervised learning the hyper-parameters are only optimized during
1570
+
1571
+ How to get the most out of Twinned Regression Methods
1572
+ 21
1573
+ Table D1: Best estimates for test RMSEs obtained by Random Forest compared to Twinned Random
1574
+ Forests
1575
+ Random Forest
1576
+ Twinned Random Forest
1577
+ Improvement
1578
+ Bio Conservation
1579
+ 0.7407± 0.0087
1580
+ 0.7427± 0.0081
1581
+ -0.27%
1582
+ Boston Housing
1583
+ 4.0019± 0.1394
1584
+ 3.8301± 0.1322
1585
+ 4.29%
1586
+ Concrete Strength
1587
+ 6.3763± 0.2136
1588
+ 5.8519± 0.2242
1589
+ 8.22%
1590
+ Energy Efficiency
1591
+ 1.8773± 0.0422
1592
+ 1.8906± 0.0417
1593
+ -0.71%
1594
+ RCL Circuit
1595
+ 0.3168± 0.0071
1596
+ 0.2958± 0.0068
1597
+ 6.63%
1598
+ Test Function
1599
+ 0.1402± 0.0044
1600
+ 0.0874± 0.0023
1601
+ 37.66%
1602
+ Wheatstone Bridge
1603
+ 0.1461± 0.0039
1604
+ 0.0942± 0.0025
1605
+ 35.52%
1606
+ Red Wine Quality
1607
+ 0.5989± 0.0085
1608
+ 0.6068± 0.0083
1609
+ -1.32%
1610
+ Yacht Hydrodynamics
1611
+ 1.1917± 0.029
1612
+ 1.1117± 0.0383
1613
+ 6.71%
1614
+ the initial supervised learning step, the optimal parameters are then carried forward to
1615
+ be used during semi-supervised learning.
1616
+ Algorithm 1: Semi-Supervised Learning through Loop Consistency
1617
+ Data: Labelled data set DL = (Xtrain,L,Ytrain,L)
1618
+ Unlabelled data set DU = (Xtrain,U)
1619
+ Input: Loop weight Λ
1620
+ Loop number nl =length(DL)/3
1621
+ 1 create ˜DL = [((xi,xj,xi − xj),yij = yi − yj) for xi ∈ Xtrain,L for xj ∈ Xtrain,L]
1622
+ 2 initialize machine learning model M
1623
+ 3 train M on ˜DL
1624
+ 4 sample nl loops L=[(xi,xj,xk) where xi ∈ Xtrain,L, xj,xk ∈ Xtrain,U]
1625
+ 5 for (xi,xj,xk) ∈ L do
1626
+ 6
1627
+ predict M(xi,xj),M(xj,xk),M(xk,xi)
1628
+ 7
1629
+ a = M(xi,xj) + M(xj,xk) + M(xk,xi)
1630
+ 8
1631
+ (yij,yjk,yki) = (M(xi,xj) − Λa,M(xj,xk) − Λa,M(xk,xi) − Λa)
1632
+ 9
1633
+ add ((xi,xj,xi − xj),yij),((xj,xk,xj − xk),yjk),((xk,xi,xk − xi),yki) to ˜DL
1634
+ 10 train M on ˜DL
1635
+ Output: Trained Model M
1636
+
1637
+ How to get the most out of Twinned Regression Methods
1638
+ 22
1639
+ [1] Wetzel S J, Ryczko K, Melko R G and Tamblyn I 2022 Applied AI Letters e78
1640
+ [2] Wetzel S J, Melko R G and Tamblyn I 2022 Machine Learning: Science and Technology 3 045007
1641
+ [3] Tynes M, Gao W, Burrill D J, Batista E R, Perez D, Yang P and Lubbers N 2021 Journal of
1642
+ Chemical Information and Modeling 61 3846–3857
1643
+ [4] Baldominos A, Blanco I, Moreno A J, Iturrarte R, Bern´ardez ´O and Afonso C 2018 Applied sciences
1644
+ 8 2321
1645
+ [5] Rafiei M H and Adeli H 2016 Journal of Construction Engineering and Management 142 04015066
1646
+ [6] Yu Y, Lu J, Shen D and Chen B 2021 Neural Computing and Applications 33 3925–3937
1647
+ [7] Ryczko K, Wetzel S J, Melko R G and Tamblyn I 2022 Journal of Chemical Theory and Computation
1648
+ 18 1122–1128
1649
+ [8] Avula N V, Veesam S K, Behera S and Balasubramanian S 2022 Machine Learning: Science and
1650
+ Technology
1651
+ [9] Czischek S, Yon V, Genest M A, Roux M A, Rochette S, Lemyre J C, Moras M, Pioro-Ladri`ere M,
1652
+ Drouin D, Beilliard Y et al. 2021 Machine Learning: Science and Technology 3 015001
1653
+ [10] Lechner M, Hasani R M and Grosu R 2018 arXiv preprint arXiv:1803.08554
1654
+ [11] Bromley J, Guyon I, LeCun Y, S¨ackinger E and Shah R 1993 Advances in neural information
1655
+ processing systems 6 737–744
1656
+ [12] Baldi P and Chauvin Y 1993 neural computation 5 402–418
1657
+ [13] Srivastava N, Hinton G, Krizhevsky A, Sutskever I and Salakhutdinov R 2014 The journal of
1658
+ machine learning research 15 1929–1958
1659
+ [14] Wan L, Zeiler M, Zhang S, Le Cun Y and Fergus R 2013 Regularization of neural networks using
1660
+ dropconnect International conference on machine learning (PMLR) pp 1058–1066
1661
+ [15] Wu J 2009 A novel artificial neural network ensemble model based on k–nearest neighbor
1662
+ nonparametric estimation of regression function and its application for rainfall forecasting
1663
+ 2009 international joint conference on computational sciences and optimization vol 2 (IEEE) pp
1664
+ 44–48
1665
+ [16] Bensaci R, Khaldi B, Aiadi O and Benchabana A 2021 Applied Sciences 11 10176
1666
+ [17] Liu Z, Guo J, Cao J, Wei Y and Huang W 2018 Promet-Traffic&Transportation 30 445–456
1667
+ [18] Wetzel
1668
+ S
1669
+ 2023
1670
+ Public
1671
+ github
1672
+ repository
1673
+ URL
1674
+ https://github.com/sjwetzel/
1675
+ PublicGetMostOutOfTNNR
1676
+ [19] Abadi M, Agarwal A, Barham P, Brevdo E, Chen Z, Citro C, Corrado G S, Davis A, Dean J,
1677
+ Devin M, Ghemawat S, Goodfellow I, Harp A, Irving G, Isard M, Jia Y, Jozefowicz R, Kaiser
1678
+ L, Kudlur M, Levenberg J, Man´e D, Monga R, Moore S, Murray D, Olah C, Schuster M,
1679
+ Shlens J, Steiner B, Sutskever I, Talwar K, Tucker P, Vanhoucke V, Vasudevan V, Vi´egas F,
1680
+ Vinyals O, Warden P, Wattenberg M, Wicke M, Yu Y and Zheng X 2015 TensorFlow: Large-
1681
+ scale machine learning on heterogeneous systems software available from tensorflow.org URL
1682
+ https://www.tensorflow.org/
1683
+ [20] Pedregosa F, Varoquaux G, Gramfort A, Michel V, Thirion B, Grisel O, Blondel M, Prettenhofer
1684
+ P, Weiss R, Dubourg V, Vanderplas J, Passos A, Cournapeau D, Brucher M, Perrot M and
1685
+ Duchesnay E 2011 Journal of Machine Learning Research 12 2825–2830
1686
+
itAzT4oBgHgl3EQfbPyr/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff