jackkuo commited on
Commit
f0597d7
·
verified ·
1 Parent(s): e95c00c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -NE4T4oBgHgl3EQf3w1M/content/tmp_files/2301.05308v1.pdf.txt +1597 -0
  2. -NE4T4oBgHgl3EQf3w1M/content/tmp_files/load_file.txt +0 -0
  3. -dE1T4oBgHgl3EQfUgPr/content/tmp_files/2301.03092v1.pdf.txt +1059 -0
  4. -dE1T4oBgHgl3EQfUgPr/content/tmp_files/load_file.txt +0 -0
  5. .gitattributes +99 -0
  6. 0NFJT4oBgHgl3EQfjiwF/vector_store/index.pkl +3 -0
  7. 0dAyT4oBgHgl3EQfbfdc/content/tmp_files/2301.00263v1.pdf.txt +749 -0
  8. 0dAyT4oBgHgl3EQfbfdc/content/tmp_files/load_file.txt +0 -0
  9. 0tE3T4oBgHgl3EQfnAp3/vector_store/index.pkl +3 -0
  10. 19A0T4oBgHgl3EQfMv_l/vector_store/index.faiss +3 -0
  11. 1dAzT4oBgHgl3EQfRfuL/content/tmp_files/2301.01217v1.pdf.txt +1312 -0
  12. 1dAzT4oBgHgl3EQfRfuL/content/tmp_files/load_file.txt +0 -0
  13. 1tA0T4oBgHgl3EQfMv-f/content/2301.02137v1.pdf +3 -0
  14. 1tA0T4oBgHgl3EQfMv-f/vector_store/index.faiss +3 -0
  15. 1tA0T4oBgHgl3EQfMv-f/vector_store/index.pkl +3 -0
  16. 29E1T4oBgHgl3EQf5wUG/content/tmp_files/2301.03514v1.pdf.txt +1100 -0
  17. 29E1T4oBgHgl3EQf5wUG/content/tmp_files/load_file.txt +0 -0
  18. 2dAyT4oBgHgl3EQfbvf7/vector_store/index.faiss +3 -0
  19. 2dFAT4oBgHgl3EQfDhxB/content/tmp_files/2301.08416v1.pdf.txt +762 -0
  20. 2dFAT4oBgHgl3EQfDhxB/content/tmp_files/load_file.txt +0 -0
  21. 2tAzT4oBgHgl3EQffPw3/content/tmp_files/2301.01448v1.pdf.txt +2181 -0
  22. 2tAzT4oBgHgl3EQffPw3/content/tmp_files/load_file.txt +0 -0
  23. 3NFKT4oBgHgl3EQfQi0f/content/2301.11767v1.pdf +3 -0
  24. 3NFKT4oBgHgl3EQfQi0f/vector_store/index.faiss +3 -0
  25. 3NFKT4oBgHgl3EQfQi0f/vector_store/index.pkl +3 -0
  26. 3tFST4oBgHgl3EQfZDim/vector_store/index.faiss +3 -0
  27. 4dFIT4oBgHgl3EQf6yuB/content/tmp_files/2301.11395v1.pdf.txt +578 -0
  28. 4dFIT4oBgHgl3EQf6yuB/content/tmp_files/load_file.txt +0 -0
  29. 59E3T4oBgHgl3EQfpgot/content/2301.04642v1.pdf +3 -0
  30. 59E3T4oBgHgl3EQfpgot/vector_store/index.faiss +3 -0
  31. 5NE1T4oBgHgl3EQfBAIU/vector_store/index.faiss +3 -0
  32. 69E4T4oBgHgl3EQf2A2F/content/2301.05295v1.pdf +3 -0
  33. 69E4T4oBgHgl3EQf2A2F/vector_store/index.faiss +3 -0
  34. 69E4T4oBgHgl3EQf2A2F/vector_store/index.pkl +3 -0
  35. 79E3T4oBgHgl3EQfRwk1/vector_store/index.faiss +3 -0
  36. 79E3T4oBgHgl3EQfRwk1/vector_store/index.pkl +3 -0
  37. 7NE4T4oBgHgl3EQfCQsk/vector_store/index.pkl +3 -0
  38. 7dE0T4oBgHgl3EQffQBI/content/2301.02401v1.pdf +3 -0
  39. 7dE0T4oBgHgl3EQffQBI/vector_store/index.faiss +3 -0
  40. 7dE0T4oBgHgl3EQffQBI/vector_store/index.pkl +3 -0
  41. 7tAyT4oBgHgl3EQfQvZV/content/2301.00051v1.pdf +3 -0
  42. 7tAyT4oBgHgl3EQfQvZV/vector_store/index.faiss +3 -0
  43. 8dE1T4oBgHgl3EQfngSj/content/2301.03310v1.pdf +3 -0
  44. 8dE1T4oBgHgl3EQfngSj/vector_store/index.pkl +3 -0
  45. 8tAyT4oBgHgl3EQf3PkC/vector_store/index.pkl +3 -0
  46. 9dE4T4oBgHgl3EQfDQsU/content/tmp_files/2301.04867v1.pdf.txt +1524 -0
  47. 9dE4T4oBgHgl3EQfDQsU/content/tmp_files/load_file.txt +0 -0
  48. 9tE4T4oBgHgl3EQf3g1J/vector_store/index.pkl +3 -0
  49. AtE2T4oBgHgl3EQfRQeF/content/tmp_files/2301.03779v1.pdf.txt +557 -0
  50. AtE2T4oBgHgl3EQfRQeF/content/tmp_files/load_file.txt +207 -0
-NE4T4oBgHgl3EQf3w1M/content/tmp_files/2301.05308v1.pdf.txt ADDED
@@ -0,0 +1,1597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Incremental Dead State Detection in
2
+ Logarithmic Time
3
+ Caleb Stanford1 and Margus Veanes2
4
+ 1 University of California, San Diego and University of California, Davis
5
6
+ 2 Microsoft Research, Redmond [email protected]
7
+ Abstract. Identifying live and dead states in an abstract transition sys-
8
+ tem is a recurring problem in formal verification. However, state-of-the-
9
+ art graph algorithms for maintaining reachability information incremen-
10
+ tally (that is, as states are visited and before the entire state space is
11
+ explored) assume that new edges can be added from any state at any
12
+ time, whereas in many applications, outgoing edges are added from each
13
+ state as it is explored. To formalize the latter situation, we propose guided
14
+ incremental digraphs (GIDs), incremental graphs which support labeling
15
+ closed states (states which will not receive further outgoing edges). Our
16
+ main result is that dead state detection in GIDs is solvable in O(log m)
17
+ time per edge update for m edges, improving upon O(√m) per edge due
18
+ to Bender, Fineman, Gilbert, and Tarjan (BFGT) for general incremen-
19
+ tal directed graphs.
20
+ We introduce two algorithms for GIDs: one establishing the logarithmic
21
+ time bound, and a second algorithm to explore a lazy heuristics-based ap-
22
+ proach. To demonstrate applicability, we show how GIDs can be used to
23
+ lazily decide regular expression constraints in SMT applications. To en-
24
+ able an apples-to-apples experimental comparison, we implemented both
25
+ algorithms, two naive baselines, and the state-of-the-art BFGT baseline
26
+ using a common directed graph interface in Rust. Our evaluation shows
27
+ 110-530x speedups over BFGT for the largest input graphs over a range
28
+ of graph classes, random graphs, and graphs arising from regular expres-
29
+ sion benchmarks.
30
+ Keywords: Dead State Detection · Graph Algorithms · Online Algo-
31
+ rithms · SMT.
32
+ 1
33
+ Introduction
34
+ Classifying states in a transition system as live or dead is a recurring problem
35
+ in formal verification. For example, given an expression, can it be simplified
36
+ to the identity? Given an input to a nondeterministic program, can it reach a
37
+ terminal state, or can it reach an infinitely looping state? Given a state in an
38
+ automaton, can it reach an accepting state? Domain-specific variations on this
39
+ problem have led to many general and specialized algorithms used by automated
40
+ arXiv:2301.05308v1 [cs.DS] 12 Jan 2023
41
+
42
+ 2
43
+ Caleb Stanford and Margus Veanes
44
+ verification tools. State classification is relevant in the context of satisfiability
45
+ modulo theories (SMT) [17,41,3,55,15,36], where theory-specific partial decision
46
+ procedures often work by exploring the state space to find a reachable path that
47
+ corresponds to a satisfying string or, more generally, a sequence of constructors.
48
+ In all of these cases, the core problem is live and dead state detection in a
49
+ directed graph.
50
+ Motivating application. For example, recent approaches for the SMT theory of
51
+ regular expressions [34,50] rely on regular expression derivatives to explore the
52
+ states of the finite state machine corresponding to the regex incrementally, rather
53
+ than expanding all states initially which is often prohibitively expensive. This
54
+ requires solving the incremental live and dead state detection problem in the
55
+ finite state machine (a directed graph). This is particularly important for regexes
56
+ with intersection and complement (extended regexes [12,22,19]), which have been
57
+ shown to arise natively in applications of SMT string solvers to security [2,50]).
58
+ Concretely, consider the regex (�*α�100)C ∩ (�α), where � matches any character,
59
+ ∩ is regex intersection, C is regex complement, and α matches any digit (0-9). A
60
+ traditional solver would expand the left and right operands as state machines,
61
+ but the left operand (�*α�100)C is astronomically large as a DFA, causing the
62
+ solver to hang. The derivative-based technique instead constructs the derivative
63
+ regex: (�*α�100)C ∩(�100)C ∩α. At this stage we have a graph of two states and one
64
+ edge, where the states are regexes and the edge is the derivative relation. After
65
+ one more derivative operation, the regex becomes one that is clearly satisfiable
66
+ as it accepts the empty string.
67
+ In order to be efficient, a derivative-based solver needs to identify satisfiable
68
+ (live) and unsatisfiable (dead) regexes incrementally (as the graph is built), be-
69
+ cause it does not generally construct the entire space before terminating (see the
70
+ graph update inference rule Upd, p. 626 [50]). Indeed, satisfiability (nonempti-
71
+ ness) for extended regexes is non-elementary, and is still PSPACE-complete for
72
+ more restrictive fragments, strongly incentivizing the incremental approach. Be-
73
+ yond regexes, we believe that GIDs are general enough to be applicable in a
74
+ range of future applications.
75
+ Prior work. Traditionally, while live state detection can be done incrementally,
76
+ dead state detection is often done exhaustively (i.e., after the entire state space
77
+ is explored). For example, bounded and finite-state model checkers based on
78
+ translations to automata [32,48,13], as well as classical dead-state elimination
79
+ algorithms [28,7,10], generally work on a fixed state space after it has been fully
80
+ enumerated. However, exhaustive exploration is prohibitive for large (e.g., ex-
81
+ ponential or infinite) state spaces which arise in an SMT verification context.
82
+ Moreover, exhaustive exploration may simply be unnecessary if partial infor-
83
+ mation can be deduced about the states seen so far which already leads to a
84
+ satisfiable or unsatisfiable result along a given solver path. We also have good
85
+ evidence that incremental feedback can improve SMT solver performance: a
86
+ representative success story is the e-graph data structure [54,16] for congruence
87
+ closure [18,42], which maintains an equivalence relation among expressions in-
88
+
89
+ Incremental Dead State Detection in Logarithmic Time
90
+ 3
91
+ crementally; because it applies to general expressions, it is theory-independent
92
+ and re-usable. Incremental state space exploration could lead to similar benefits
93
+ if applied to SMT procedures which still rely on exhaustive search.
94
+ However, in order to perform incremental dead state detection, we currently
95
+ lack algorithms which match offline performance. As we discuss in Section 2,
96
+ the best-known existing solutions would require maintaining strong connected
97
+ components (SCCs) incrementally. For SCC maintenance and the related sim-
98
+ pler problem of cycle detection, O(m3/2) amortized algorithms are known for m
99
+ edge additions [23,4], with some recently announced improvements [5,8]. Note
100
+ that this is in sharp contrast to O(m) for the offline variants of these problems,
101
+ which can be solved by breadth-first or depth-first search. More generally, re-
102
+ search suggests that there is a computational barrier to what can be determined
103
+ incrementally in the worst case [20,21,1].
104
+ This paper. To improve on prior results, our key observation is that in many
105
+ applications, edges are not added adversarially, but from one state at a time as
106
+ the states are explored. As a result, we know when a state will have no further
107
+ outgoing edges; we may use this information to (i) provide information about
108
+ dead states incrementally, rather than after the whole state space is explored;
109
+ and (ii) obtain more efficient algorithms than currently exist for general graph
110
+ reachability.
111
+ We introduce guided incremental digraphs (GIDs), a variation on incremental
112
+ graphs. Like an incremental directed graph, a guided incremental digraph may be
113
+ updated by adding new edges between states, or a state may be labeled as closed,
114
+ meaning it will receive no further outgoing edges. Some states are designated as
115
+ terminal, and we say that a state is live if it can reach a terminal state and dead
116
+ if it will never reach a terminal state in any extension – i.e. if all reachable states
117
+ from it are closed. To our knowledge, the problem of detecting dead states in
118
+ such a system has not been studied by existing work in graph algorithms. Our
119
+ problem can be solved through solving SCC maintenance, but not necessarily
120
+ the other way around (see Proposition 1). We provide two new algorithms for
121
+ dead-state detection in GIDs.
122
+ First, we show that the dead-state detection problem for GIDs can be solved
123
+ in time O(m · log m) for m edge additions, within a logarithmic factor of the
124
+ O(m) cost for offline search. The worst-case performance of our algorithm thus
125
+ strictly improves on the O(m3/2) upper bound for SCC maintenance in general
126
+ incremental graphs. Our algorithm utilizes several data structures and existing
127
+ results in online algorithms: in particular, Union-Find [52] and Henzinger and
128
+ King’s Euler Tour Trees [26]. The main idea of our algorithm is that, rather than
129
+ explicitly computing the set of SCCs, for closed states we maintain a single path
130
+ to a non-closed (open) state. This turns out to reduce the problem to quickly
131
+ determining whether two states are currently assigned a path to the same open
132
+ state. On the other hand, Euler Tour Trees can solve undirected reachability for
133
+ graphs that are forests in logarithmic time; the challenge then lies in figuring
134
+ out how to reduce directed connectivity in the graph of paths to undirected
135
+ connectivity in an Euler Tour Trees forest. At the same time, we must maintain
136
+
137
+ 4
138
+ Caleb Stanford and Margus Veanes
139
+ this structure under Union-Find state merges, in order to deal with cycles that
140
+ are found in the graph.
141
+ While as theorists we would like to believe that asymptotic complexity is
142
+ enough, the truth is that the use of complex data structures (1) can be pro-
143
+ hibitively expensive in practice due to constant-time overheads, and (2) can
144
+ make algorithms substantially more difficult to implement, leading practition-
145
+ ers to prefer simpler approaches. To address these needs, in addition to the
146
+ logarithmic-time algorithm, we provide a second lazy algorithm which avoids
147
+ the user of Euler Tour Trees, and only uses union-find. This algorithm is based
148
+ on an optimization of adding shortcut jump edges for long paths in the graph to
149
+ quickly determine reachability. This approach aims to perform well in practice
150
+ on typical graphs, and is evaluated in our evaluation along with the logarithmic
151
+ time algorithm, though we do not prove its asymptotic complexity.
152
+ Finally, we implement and empirically evaluate both of our algorithms for
153
+ GIDs against several baselines in 5.5k lines of code in Rust [37,33]. Our evaluation
154
+ focuses on the performance of the GID data structure itself, rather than its end-
155
+ to-end performance in applications. To ensure an apples-to-apples comparison
156
+ with existing approaches, we put particular focus on providing a directed graph
157
+ data structure backend shared by all algorithms, so that the cost of graph search
158
+ as well as state and edge merges is identical across algorithms. We implement
159
+ two naive baselines, as well as an implementation of the state-of-the-art solution
160
+ based on maintaining SCCs, BFGT [4] in our framework. To our knowledge,
161
+ the latter is the first implementation of BFGT for SCC maintenance. On a
162
+ collection of generated benchmark GIDs and GIDs directly pulled from the regex
163
+ application, we demonstrate a substantial improvement over BFGT for both of
164
+ our algorithms. For example, for larger GIDs (those with over 100K updates),
165
+ we observe a 110-530x speedup over BFGT.
166
+ Our primary contributions are:
167
+ – Guided incremental digraphs (GIDs), a formalization of incremental live and
168
+ dead state detection which supports labeling closed states. (Section 2)
169
+ – Two algorithms for the state classification problem in GIDs: first, an algo-
170
+ rithm that works in amortized O(log m) time per update, improving upon
171
+ the state-of-the-art amortized O(√m) per update for incremental graphs;
172
+ and second, a simpler algorithm based on lazy heuristics. (Section 3)
173
+ – An open-source implementation of GIDs in Rust3, including an implementa-
174
+ tion the BFGT baseline and supporting data structures for a fair comparison,
175
+ and an evaluation which demonstrates that our algorithm outperforms the
176
+ best known incremental SCC algorithm by two orders of magnitude for a
177
+ range of GID benchmarks. (Section 4)
178
+ Following these contributions, we expand on the application of GIDs to regex
179
+ solving in SMT (Section 5), survey related work (Section 6), and conclude (Sec-
180
+ tion 7).
181
+ 3 https://github.com/cdstanford/gid
182
+
183
+ Incremental Dead State Detection in Logarithmic Time
184
+ 5
185
+ 1
186
+ 2
187
+ 3
188
+ Fig. 1.
189
+ Guided incremental digraph consisting of the sequence of updates E(1, 2),
190
+ E(1, 3), T(2). Terminal states are denoted with double circles. After the update T(2),
191
+ states 1 and 2 are known to be live. However, for this graph state 3 is not dead, as a
192
+ future edge may cause it to be live.
193
+ 2
194
+ Guided Incremental Digraphs
195
+ 2.1
196
+ Problem Statement
197
+ An incremental digraph is a sequence of edge updates E(u, v), where the algo-
198
+ rithmic challenge in this context is to produce some output after each edge is
199
+ received (e.g., whether or not a cycle exists). If the graph also contains updates
200
+ T(u) labeling a state as terminal, then we say that a state is live if it can reach
201
+ a terminal state in the current graph. In a guided incremental digraph, we also
202
+ include updates C(u) labeling a state as closed, meaning that will not receive
203
+ any further outgoing edges.
204
+ Definition 1. Define a guided incremental digraph (GID) to be a sequence of
205
+ updates, where each update is one of the following:
206
+ (i) a new directed edge E(u, v);
207
+ (ii) a label T(u) which indicates that u is terminal; or
208
+ (iii) a label C(u) which indicates that u is closed, i.e. no further edges will be
209
+ added going out from u (or labels to u).
210
+ The guided incremental digraph is valid if the closed labels are correct: there
211
+ are no instances of E(u, v) or T(u) after an update C(u). The denotation of G is
212
+ the directed graph (V, E) where V is the set of all states u which have occurred
213
+ in any update in the sequence, and E is the set of all (u, v) such that E(u, v)
214
+ occurs in G.
215
+ An extension of a valid GID G is a valid GID G′ such that G is a prefix of
216
+ G′. In a valid GID G, we say that a state u is live if there is a path from u to
217
+ a terminal state in the denotation of G; and a state u is dead if it is not live in
218
+ any extension of G. Notice that in a GID without any C(u) updates, no states
219
+ are dead as an edge may be added in an extension which makes them live.
220
+ We provide an example of a valid GID in Figures 1 and 2 resulting from
221
+ the following sequence of updates: E(1, 2), E(1, 3), T(2), E(4, 3), E(4, 5), C(4),
222
+ C(5). The states are denoted based on whether they are marked as terminal T(u)
223
+ (double circle) or closed C(u); states that are not closed are denoted with dashed
224
+ circles. Notice that after C(4), state 4 is marked closed but can still reach 3 and
225
+ 5, so it is not dead. Additionally, notice that once 1 and 2 are live (after T(2)),
226
+ further edges from them do not matter.
227
+
228
+ 6
229
+ Caleb Stanford and Margus Veanes
230
+ 1
231
+ 2
232
+ 3
233
+ 4
234
+ 5
235
+ Fig. 2.
236
+ Guided incremental digraph consisting of the sequence of updates E(1, 2),
237
+ E(1, 3), T(2), E(4, 3), E(4, 5), C(4), C(5). Closed states (from which future edges may not
238
+ be added) are denoted with solid circles. After the update C(5) (but not earlier), state
239
+ 5 is dead.
240
+ Definition 2. Given as input a valid GID, the guided incremental state clas-
241
+ sification problem is to output, in an online fashion after each update, the set
242
+ of new live and new dead states. That is, output Live(u) or Dead(u) on the
243
+ smallest prefix of updates such that u is live or dead on that prefix, respectively.
244
+ 2.2
245
+ Existing Approaches
246
+ In many applications, one might choose to classify dead states offline, after the
247
+ entire state space is enumerated. This leads to a linear-time algorithm via either
248
+ DFS or BFS, but it does not solve the state classification problem (Definition 2)
249
+ because it is not incremental. Naive application of this idea leads to an O(m)
250
+ time algorithm per update for m updates (O(m2) total), as we may have to
251
+ search the entire graph after each update.
252
+ For acyclic graphs, there exists an amortized O(1)-time per update algorithm
253
+ for the problem (Definition 2): maintain the graph as a list of forward and
254
+ backward edges at each state. When a state v is marked terminal, do a DFS
255
+ along backward edges to determine all states u that can reach v not already
256
+ marked as live, and mark them live. When a state v is marked closed, visit
257
+ all forward-edges from v; if all are dead, mark v as dead and recurse along all
258
+ backwards edges from v. As each edge is visited only when marking a state live
259
+ or dead, it is only visited a constant number of times overall (though we may
260
+ use more than O(1) time on some particular update pass). Additionally, the live
261
+ state detection part of this procedure still works for graphs containing cycles.
262
+ The challenge, therefore, lies primarily in detecting dead states in graphs
263
+ which may contain cycles. For this, the breakthrough state-of-the-art approach
264
+ from [4] enables maintaining the graph as a condensed graph which is acyclic,
265
+ where the vertices in the condensed graph represent strongly connected compo-
266
+ nents (SCCs) of states. The mapping from states to SCCs is maintained using
267
+ a Union-Find [52] data structure. Maintaining this requires O(√m) time per
268
+ update. To ensure that vertices in the condensed graph correspond to SCCs in
269
+ the original, we have to also make sure that closed and non-closed states are not
270
+ merged into the same SCC; the easiest solution to this is to withhold all edges
271
+ from each state u in the graph until u are closed, which ensures that u must
272
+ be its own SCC. Once we have the condensed graph with these modifications,
273
+
274
+ Incremental Dead State Detection in Logarithmic Time
275
+ 7
276
+ the same algorithm as in the previous paragraph works to identify live and dead
277
+ states. Since each edge is only visited when a state is marked closed or live, each
278
+ edge is visited only once throughout the algorithm, we use only amortized O(1)
279
+ additional time to calculate live and dead states. While this SCC maintenance
280
+ algorithm ignores the fact that edges do not occur from closed states C(u), this
281
+ still proves the following result:
282
+ Proposition 1. Guided incremental state classification reduces to SCC main-
283
+ tenance. That is, suppose we have an algorithm over incremental graphs that
284
+ maintains the set of SCCs in O(f(m, n)) total time given n states and m edge
285
+ additions, where “maintains” means that (i) we can check whether two states
286
+ are in the same SCC in O(1) time, and (ii) we can iterate over all the states
287
+ in an SCC, or iterate over the forward-edges or backward-edges from an SCC
288
+ (to or from other SCCs, respectively) in O(1) time per edge. Then there exists
289
+ an algorithm to solve guided incremental state classification in O(f(m, n)) total
290
+ time.
291
+ Despite this reduction one way, there is no obvious reduction the other way –
292
+ from cycle detection or SCCs to Definition 2. This is because, while the existence
293
+ of a cycle of non-live states implies bi-reachability between all states in the cycle,
294
+ it does not necessarily imply that all of the bi-reachable states are dead. In fact,
295
+ consider a GID consisting only of E(u, v) and C(u) updates. In order to identify
296
+ dead states, rather than enumerating all cycles or SCCs, it is enough to maintain
297
+ a single path from each non-dead state to a non-closed state. This observation
298
+ forms the starting point for our approach in the following sections, improving
299
+ on the upper bound given by Proposition 1.
300
+ 3
301
+ Algorithms
302
+ This section presents Algorithm 2, which solves the state classification problem in
303
+ logarithmic time (Theorem 2); and Algorithm 3, an alternative lazy approach.
304
+ Both algorithms are optimized versions of Algorithm 1, a first-cut algorithm
305
+ which establishes the structure of our approach. We begin by establishing some
306
+ basic terminology shared by all of the algorithms (see Figure 3).
307
+ States in a GID can be usefully classified as exactly one of four statuses: live,
308
+ dead, unknown, or open, where an unknown state is one that is closed but not
309
+ yet live or dead, and an open state is one that is not closed and not live. Note
310
+ that a state may be live and neither open nor closed; this terminology keeps the
311
+ classification disjoint. Pragmatically, for live states it does not matter if they
312
+ are classified as open or closed, since edges from those states no longer have any
313
+ effect. However, all dead and unknown states are closed, and no states are both
314
+ open and closed.
315
+ Given this classification, the intuition is that for each unknown state u, we
316
+ only need one path from u to an open state to prove that it is not dead; we want
317
+ to maintain one such path for all unknown states. To maintain all of these paths
318
+ simultaneously, we maintain an acyclic directed forest structure on unknown and
319
+
320
+ 8
321
+ Caleb Stanford and Margus Veanes
322
+ Live
323
+ Some reachable state from u is terminal.
324
+ Dead
325
+ All reachable states from u (including u itself) are closed and
326
+ not terminal.
327
+ Unknown
328
+ u is closed, but not live or dead.
329
+ Open
330
+ u is not live and not closed.
331
+ Terminal
332
+ A state u labeled by T(u).
333
+ Closed
334
+ A state u labeled by C(u).
335
+ Canonical
336
+ A state x that is the uniquely chosen representative of its
337
+ union-find equivalence class, i.e. UF.find(x) = x.
338
+ u, v, w
339
+ States in the original graph.
340
+ x, y, z
341
+ Canonical states in the condensed graph.
342
+ Successor succ(x)
343
+ For an unknown, canonical state x, a uniquely chosen state v
344
+ such that (x, v) is an edge, and following the path of successors
345
+ leads to an open state.
346
+ Fig. 3. Top: Basic classification of GID states into four disjoint categories. Bottom:
347
+ Additional terminology used in this paper.
348
+ open states where the roots are open states, and all non-root states have a single
349
+ edge to another state, called its successor. Edges other than successor edges can
350
+ be temporarily ignored, except for when marking live states; these are kept as
351
+ reserve edges. Specifically, we add every edge (u, v) as a backward-edge from v
352
+ (to allow propagating live states), but for edges not in the forest we keep (u, v)
353
+ in a reserve list from u. We store all edges, including backward-edges, in the
354
+ original order (u, v). The reserve list edge becomes relevant only when either (i)
355
+ u is marked as closed, or (ii) u’s successor is marked as dead.
356
+ In order to deal with cycles, we need to maintain the forest of unknown states
357
+ not on the original graph, but on a union-find condensed graph, similar to [52].
358
+ When we find a cycle of unknown states, we merge all states in the cycle by
359
+ calling the union method in the union-find. We refer to a state as canonical if
360
+ it is the canonical representative of its equivalence class in the union find; the
361
+ condensed graph is a forest on canonical states. We use x, y, z to denote canonical
362
+ states (states in the condensed graph), and u, v, w to denote the original states
363
+ (not known to be canonical).
364
+ Following [52], we maintain edges as linked lists rather than sets, and using
365
+ the original states instead of canonical states; this is important as it allows
366
+ combining edge lists in O(1) time when merging states.
367
+ 3.1
368
+ First-Cut Algorithm
369
+ A first-cut algorithm based on these ideas is shown in Algorithm 1. The union-
370
+ find data structure UF provides UF.union(v1, v2), UF.find(v), and UF.iter(v):
371
+ UF.union merges v1 and v2 to refer to the same canonical state, UF.find returns
372
+ the canonical state for v, and UF.iter iterates over states equivalent to v. The
373
+ running time of each of these operations is amortized α(n) for n updates, where
374
+ α(n) ∈ o(log n) is the inverse Ackermann function.
375
+
376
+ Incremental Dead State Detection in Logarithmic Time
377
+ 9
378
+ Algorithm 1 First-cut algorithm.
379
+ V: a type for states (integers) (variables u, v, . . .)
380
+ E: the type of edges, equal to (V, V)
381
+ UF: a union-find data structure over V
382
+ X: the set of canonical states in UF (variables x, y, z, . . .)
383
+ status: a map from X to Live, Dead, Unknown, or Open
384
+ succ: a map from X to V
385
+ res and bck: maps from X to linked lists of E
386
+ procedure OnEdge(E(u, v))
387
+ x ← UF.find(u); y ← UF.find(v)
388
+ if status(y) = Live then
389
+ OnTerminal(T(x))
390
+ ▷ mark x and its ancestors live
391
+ else if status(x) ̸= Live then
392
+ ▷ status(x) must be Open
393
+ append (u, v) to res(x)
394
+ append (u, v) to bck(y)
395
+ procedure OnTerminal(T(v))
396
+ y ← UF.find(v)
397
+ for all x in DFS backwards (along bck) from y not already Live do
398
+ status(x) ← Live
399
+ output Live(x′) for all x′ in UF.iter(x)
400
+ procedure OnClosed(C(v))
401
+ y ← UF.find(v)
402
+ if status(y) ̸= Open then return
403
+ ▷ y is already live or closed
404
+ while res(y) is nonempty do
405
+ pop (v, w) from res(y); z ← UF.find(w)
406
+ if status(z) = Dead then continue
407
+ else if CheckCycle(y, z) then
408
+ for all z′ in cycle from z to y do z ← Merge(z, z′)
409
+ else
410
+ status(y) ← Unknown; succ(y) ← z;
411
+ return
412
+ status(y) ← Dead; output Dead(y′) for all y′ in UF.iter(y)
413
+ ToRecurse ← ∅
414
+ for all (u, v) in bck(y) do
415
+ x ← UF.find(u)
416
+ if status(x) = Unknown and UF.find(succ(x)) = y then
417
+ status(x) ← Open
418
+ ▷ temporary – marked closed on recursive call
419
+ add x to ToRecurse
420
+ for all x in ToRecurse do OnClosed(C(x))
421
+ procedure CheckCycle(y, z) returning bool
422
+ while status(z) = Unknown do z ← UF.find(succ(z))
423
+ ▷ get root state from z
424
+ return y = z
425
+ procedure Merge(x, y) returning V
426
+ z ← UF.union(x, y)
427
+ bck(z) ← bck(x) + bck(y)
428
+ ▷ O(1) linked list append
429
+ res(z) ← res(x) + res(y)
430
+ ▷ O(1) linked list append
431
+ return z
432
+
433
+ 10
434
+ Caleb Stanford and Margus Veanes
435
+ We will only merge states if they are bi-reachable from each other, and both
436
+ unknown. This implies that all states in the equivalence class of a canonical state
437
+ x have the same status. We also maintain the set of canonical states in UF as a
438
+ set X.
439
+ The edge maps res and bck are stored as maps from X to linked lists of edges.
440
+ Each edge (u, v) is always stored using its original states (i.e., edge labels are not
441
+ updated when states are merged); but we can easily obtain the corresponding
442
+ edge on canonical states via (UF.find(u), UF.find(v)). On an input edge (u, v),
443
+ the edge is immediately added to bck as a backward-edge from v (this is used
444
+ for detecting live states), but forward-edges are only added selectively to succ
445
+ when they are needed to prove unknown status, and are otherwise stored in the
446
+ reserve list res.
447
+ Invariants. In total, we maintain the following invariants. For live and dead
448
+ states, we no longer care about forward or backward edges from them, so there
449
+ are no invariants about live and dead states other than that status(x) is Live or
450
+ Dead for these states, respectively. The successor edges and no cycles invariants
451
+ are about the forest structure. The last invariant, edge representation, is about
452
+ making sure that all edges in the input GID are represented somehow in the
453
+ current graph.
454
+ – Merge equivalence: For all states u and v, if UF.find(u) = UF.find(v), then
455
+ u and v are bi-reachable and both closed. (This implies that u and v are
456
+ both live, both dead, or both unknown.)
457
+ – Status correctness: For all u, status(UF.find(u)) is equal to the status of
458
+ u.
459
+ – Successor edges: If x is unknown, then succ(x) is defined and is an unknown
460
+ or open state. If x is open, then succ(x) is not defined.
461
+ – No cycles: There are no cycles among the set of edges (x, UF.find(succ(x))),
462
+ over all unknown and open canonical states x.
463
+ – Edge representation: Lastly, for all edges (u, v) in the input GID, at least one
464
+ of the following holds: (i) (u, v) is stored in res, i.e. (u, v) ∈ res(UF.find(v));
465
+ (ii) (u, v) is stored in succ, i.e. v = succ(UF.find(u)); (iii) u and v are
466
+ equivalent in the condensed graph, i.e. UF.find(u) = UF.find(v); (iv) u is
467
+ live; or (v) v is dead.
468
+ Proposition 2. Algorithm 1 is correct.
469
+ Proof. We need to argue that all of the invariants described above are preserved.
470
+ This implies correctness of the algorithm by the status correctness invariant,
471
+ since it implies that live and dead states are labeled (and output) correctly.
472
+ Upon receiving E(u, v) or T(u), this may cause some dead, unknown, or open
473
+ states to change status to live, but does not change the status of any other
474
+ states. As bck stores all backwards-edges (not just succ edges), live states are
475
+ marked correctly. This preserves the forest invariants because if an unknown or
476
+ open state is marked live, so are all its predecessors. This also preserves the edge
477
+ representation invariant, either by adding new edges to case (i) and case (iv)
478
+
479
+ Incremental Dead State Detection in Logarithmic Time
480
+ 11
481
+ (for E updates) or moving edges from cases (i)-(iii) to case (iv) (for both E and
482
+ T updates).
483
+ The procedure C(u) is recursive; during the procedure and on recursive calls,
484
+ some states are temporarily marked Open, meaning they are roots in the forest
485
+ structure. During these recursive calls, we need a slightly generalized invariant:
486
+ each forest root corresponds to a pending future call to OnClosed(C(u)) (i.e.,
487
+ an element of ToRecurse for some call on the stack) and is a state that needs to
488
+ either find a successor or be marked dead. This means that the status labels for
489
+ unknown- and open-labeled states are not necessarily correct during recursive
490
+ calls; we are only concerned with preserving the forest structure, so that they will
491
+ be correct after all calls complete. Note in particular that after merging cycles
492
+ of unknown states via Merge(x, y), the new root is marked Open to preserve
493
+ the generalized invariant on recursive calls.
494
+ Upon receiving C(u), without loss of generality we may assume status(u) =
495
+ Open (the opposite only occurs if the input GID has duplicate C(u) updates, or
496
+ if C(u) occurs for a live state). At the top of the while loop, there are three cases:
497
+ – If a reserve edge is available, and its target is dead, then we discard it. This
498
+ preserves edge representation because that edge still satisfies (iv).
499
+ – If a reserve edge is available and its target is not dead, then we see if adding
500
+ that edge creates a cycle by calling CheckCycle. If no, then the two trees are
501
+ distinct, so we add an edge between them. This preserves edge representation
502
+ by moving that edge from case (i) to case (ii). If yes, then we collapse the
503
+ cycle in the forest to a single state by repeated calls to Merge(x, y). This
504
+ preserves edge representation by moving the edges in the cycle from case (ii)
505
+ to case (iii). The forest structure of the succ is also preserved because if a
506
+ graph is a forest, then it remains a forest after merging adjacent states.
507
+ – Finally, if there are no reserve edges left, then because of edge representation,
508
+ and because there is no successor from u, all edges from u must lead to dead
509
+ states (case (v)) and therefore u is dead. This case splits the tree rooted at
510
+ u into one tree for each of its predecessors, preserving the forest structure.
511
+ Each of the succ edges from predecessors are deleted; this preserves edge
512
+ representation by moving those edges from case (ii) to case (v).
513
+ In any case, the generalized invariant for recursive calls is preserved, and all dead
514
+ states are labeled correctly (given the invariant), so we are done.
515
+ ⊓⊔
516
+ Complexity. The core inefficiency in Algorithm 1 lies in CheckCycle. The proce-
517
+ dure repeatedly sets z ← succ(z) to find the tree root, which in general could be
518
+ linear time in the number of edges. For example, suppose we have a line graph
519
+ where we add edges in a backwards fashion and close them: E(2, 1), C(2), E(3,
520
+ 2), C(3), . . . , E(n, n-1), C(n). In such a graph, this inefficiency results in O(m2)
521
+ work. We therefore want to improve upon this step in Algorithms 2 and 3.
522
+ All other parts of the algorithm run in amortized α(m) time per update for
523
+ m updates (using vectors to represent the maps fwd, bck, and succ for O(1)
524
+ lookups). The OnTerminal calls and loop iterations only run once per edge
525
+ in the graph when the target of that edge is marked live or terminal. Likewise,
526
+
527
+ 12
528
+ Caleb Stanford and Margus Veanes
529
+ the procedure OnClosed is called conservatively: the cost of each call can be
530
+ assigned either to the target of an edge being marked dead, or to an edge being
531
+ merged as part of a cycle. Both marking dead and merging can only happen
532
+ once for a given edge, so this is O(1) per edge.
533
+ 3.2
534
+ Logarithmic Algorithm
535
+ At its core, CheckCycle requires solving an undirected reachability problem on
536
+ a graph that is restricted to a forest. However, the forest is changed not just by
537
+ edge additions, but edge additions and deletions. While undirected reachability
538
+ and reachability in directed graphs are both difficult to solve incrementally,
539
+ reachability in dynamic forests can be solved in O(log m) time per operation.
540
+ Our algorithm uses an Euler Tour Forest data structure EF of Henzinger and
541
+ King [26], and is shown in Algorithm 2.
542
+ However, this idea does not work straightforwardly – once again because of
543
+ the presence of cycles in the original graph. We cannot simply store the forest as
544
+ a condensed graph with edges on condensed states. As we saw in Algorithm 1,
545
+ it was important to store successor edges as edges into V, rather than edges into
546
+ X – this is the only way that we can merge states in O(1), without actually
547
+ inspecting the edge lists. If we needed to update the forest edges to be in X, this
548
+ could require O(m) work to merge two O(m)-sized edge lists as each edge might
549
+ need to be relabeled in the EF graph.
550
+ To solve this challenge, we instead store the EF data structure on the original
551
+ states, rather than the condensed graph; but we ensure that each condensed state
552
+ is represented by a tree of original states in the original graph. When adding
553
+ edges between condensed graph states, we need to make sure to remember the
554
+ original state labels (u, v), so that we can later remove it using the original
555
+ labels (this happens when its target becomes dead). When an edge would create
556
+ a cycle, we instead simply ignore it in the EF graph: because a line of connected
557
+ trees forms a tree.
558
+ In summary, algorithm borrows most data and procedures from Algorithm 1,
559
+ with a few important changes: (1) we maintain the EF data structure EF, a forest
560
+ on V (2) the successor edges are stored as their original edge labels (u, v), rather
561
+ than just as a target state; (3) the procedure OnClosed is updated in several
562
+ locations to maintain the graph EF.
563
+ Invariants. We continue all invariants from Algorithm 1, with the small mod-
564
+ ification that the successor edges and no cycles invariants use the new succ
565
+ representation: that is, they are constraints on the edges (x, UF.find(v)), where
566
+ succ(x) = (u, v). In addition, we have two constraints on edges in EF, de-
567
+ pending on whether those states are equivalent in the union-find structure. We
568
+ call edges between inequivalent states inter-edges and those between equivalent
569
+ states intra-edges.
570
+ – EF inter-edges: For all states u and v not in the same canonical state, (u, v)
571
+ is in the EF graph if and only if (u, v) = succ(UF.find(u)) or (v, u) =
572
+ succ(UF.find(v)).
573
+
574
+ Incremental Dead State Detection in Logarithmic Time
575
+ 13
576
+ Algorithm 2 Logarithmic time algorithm.
577
+ All data from Algorithm 1
578
+ succ: a map from X to E (instead of to V)
579
+ EF: Euler Tour Trees data structure providing: EF.add, EF.remove, and EF.connected
580
+ procedure OnEdge(E(u, v)) as in Algorithm 1
581
+ procedure Merge(x, y) returning V as in Algorithm 1
582
+ procedure OnTerminal(T(v))
583
+ y ← UF.find(v)
584
+ for all x in DFS backwards (along bck) from y not already Live do
585
+ if status(x) = Unknown then
586
+ ▷ The following line is not strictly necessary, but simplifies the analysis
587
+ (u, v) ← succ(x); delete succ(x); EF.remove(u, v)
588
+ status(x) ← Live; output Live(x′) for all x′ in UF.iter(x)
589
+ procedure OnClosed(C(v))
590
+ y ← UF.find(v)
591
+ if status(y) ̸= Open then return
592
+ while res(y) is nonempty do
593
+ pop (v, w) from res(y); z ← UF.find(w)
594
+ if status(z) = Dead then continue
595
+ else if CheckCycle(y, z) then
596
+ for all z′ in cycle from z to y do z ← Merge(z, z′)
597
+ else
598
+ status(x) ← Unknown; succ(x) ← (v, w)
599
+ EF.add(v, w)
600
+ ▷ undirected edge; use original labels (not (x, y))
601
+ return
602
+ status(y) ← Dead; output Dead(y′) for all y′ in UF.iter(y)
603
+ ToRecurse ← ∅
604
+ for all (u, v) in bck(y) do
605
+ x ← UF.find(u)
606
+ if status(x) = Unknown then
607
+ (u′, v′) ← succ(x)
608
+ if UF.find(v′) = y then
609
+ EF.remove(u′, v′)
610
+ ▷ undirected edge; use original labels
611
+ status(x) ← Open; delete succ(x); add x to ToRecurse
612
+ for all x in ToRecurse do OnClosed(C(x))
613
+ procedure CheckCycle(y, z) returning bool
614
+ return EF.connected(y, z)
615
+
616
+ 14
617
+ Caleb Stanford and Margus Veanes
618
+ – EF intra-edges: For all unknown canonical states x, the set of edges (u, v)
619
+ in the EF between states belonging to x forms a tree.
620
+ Theorem 1. Algorithm 2 is correct.
621
+ Proof. Observe that the EF inter-edges constraint implies that EF only contains
622
+ edges between unknown and open states, together with isolated trees. In the
623
+ modified OnTerminal procedure, when marking states as live we remove inter-
624
+ edges, so we preserve this invariant.
625
+ Next we argue that given the invariants about EF, for an open state y the
626
+ CheckCycle procedure returns true if and only if (y, z) would create a directed
627
+ cycle. If there is a cycle of canonical states, then because canonical states are
628
+ connected trees in EF, the cycle can be lifted to a cycle on original states, so y and
629
+ z must already be connected in this cycle without the edge (y, z). Conversely, if
630
+ y and z are connected in EF, then there is a path from y to z, and this can be
631
+ projected to a path on canonical states. However, because y is open, it is a root
632
+ in the successor forest, so any path from y along successor edges travels only
633
+ on backwards edges; hence z is an ancestor of y in the directed graph, and thus
634
+ (y, z) creates a directed cycle.
635
+ This leaves the OnClosed procedure. Other than the EF lines, the structure
636
+ is the same as in Algorithm 1, so the previous invariants are still preserved,
637
+ and it remains to check the EF invariants. When we delete the successor edge
638
+ and temporarily mark status(x) = Open for recursive calls, we also remove it
639
+ from EF, preserving the inter-edge invariant. Similarly, when we add a successor
640
+ edge to x, we add it to EF, preserving the inter-edge invariant. So it remains to
641
+ consider when the set of canonical states changes, which is when merging states
642
+ in a cycle. Here, a line of canonical states is merged into a single state, and a
643
+ line of connected trees is still a tree, so the intra-edge invariant still holds for
644
+ the new canonical state, and we are done.
645
+ ⊓⊔
646
+ Theorem 2. Algorithm 2 uses amortized logarithmic time per edge update.
647
+ Proof. By the analysis of Algorithm 1, each line of the algorithm runs amortized
648
+ O(1) time other than those in CheckCycle. For the CheckCycle, procedure
649
+ it now avoids the loop and so also runs amortized O(1) times. Each line is either
650
+ constant-time, α(m) = o(log n) time for the UF calls, or O(log n) time for the EF
651
+ calls, so in total the algorithm runs in amortized O(log n) time per update.
652
+ ⊓⊔
653
+ 3.3
654
+ Lazy Algorithm
655
+ While the asymptotic complexity of log n could be the end of the story, in prac-
656
+ tice, the cost of the EF calls could be a significant overhead. The technical details
657
+ of Euler-Tour Trees include building an AVL-tree cycle for each tree, where the
658
+ cycle contains each state of the graph and each edge in the graph twice. It turns
659
+ out that adding one edge to EF results in up to eight modifications to the AVL
660
+ tree: it needs to be split at the source, split at the target, then the edge needs to
661
+
662
+ Incremental Dead State Detection in Logarithmic Time
663
+ 15
664
+ Algorithm 3 Lazy algorithm.
665
+ All data from Algorithm 1
666
+ jumps: a map from X to lists of V
667
+ procedure OnEdge(E(u, v)) as in Algorithm 1
668
+ procedure OnTerminal(T(v)) as in Algorithm 1
669
+ procedure OnClosed(C(v)) as in Algorithm 1
670
+ procedure CheckCycle(y, z) returning bool
671
+ return y = GetRoot(z)
672
+ procedure GetRoot(z) returning V
673
+ if status(z) = Open then return z
674
+ if jumps(z) is empty then
675
+ push succ(z) to jumps(z)
676
+ ▷ set 0th jump
677
+ repeat
678
+ pop w from jumps(z); z′ = UF.find(w)
679
+ ▷ remove dead jumps
680
+ until status(z′) ̸= Dead
681
+ push z′ to jumps(z)
682
+ result ← GetRoot(z′)
683
+ n ← length(jumps(z)); n′ ← length(jumps(z′))
684
+ if n ≤ n′ then
685
+ push jumps(z′)[n − 1] to jumps(z)
686
+ ▷ set nth jump
687
+ return result
688
+ procedure Merge(x, y) returning V
689
+ z ← UF.union(x, y)
690
+ bck(z) ← bck(x) + bck(y); res(z) ← res(x) + res(y)
691
+ jumps(z) ← empty
692
+ return z
693
+ be added in both directions (u, v) and (v, u) to the cycle, and then these trees
694
+ need to be glued together. Every one of these operations comes with a rebalanc-
695
+ ing operation which could do Ω(log n) tree rotations and pointer dereferences to
696
+ visit the nodes in the AVL tree.
697
+ As a result of these (constant-time) overheads, in this section, we investigate
698
+ a simpler, lazy algorithm which avoids Euler Tour Trees, and works directly by
699
+ modifying Algorithm 1. To optimize Algorithm 1, one idea in the right direction
700
+ is to store for each state a direct pointer to the root which results from repeatedly
701
+ calling succ. But there are two issues with this. First, maintaining this may be
702
+ difficult (when the root changes, potentially updating a linear number of root
703
+ pointers). Second, the root may sometimes be marked dead, in which case we
704
+ have to re-compute all the successor pointers.
705
+ Instead, we introduce a jump list from each state: intuitively, it will contain
706
+ states after calling successor once, twice, four times, eight times, and so on, and
707
+ will be updated at most once for every visit to the state. When a jump becomes
708
+ obsolete (the target dead), we just pop off the largest jump, so we do not lose all
709
+ of our work in building the list. When states are merged, we do lose our work,
710
+ discarding the jump lists, and this also means the exact power-of-two distances
711
+ are no longer preserved. We maintain the following additional information: for
712
+
713
+ 16
714
+ Caleb Stanford and Margus Veanes
715
+ each unknown canonical state x, a nonempty list of jumps [v0, v1, v2, . . . , vk],
716
+ such that v0 is reachable from x, v1 is reachable from v0, v2 is reachable from
717
+ v1, and so on, and v1 = succ(x). The algorithm is shown in Algorithm 3.
718
+ The key procedure is GetRoot(z), which is called when adding a reserve
719
+ edge (y, z) to the graph. It finds the root in the successor forest when repeatedly
720
+ calling successor from z. It shortcuts by using the jump list, and also updates
721
+ the set of jumps to be approximately at powers of two: to do this, the nth jump
722
+ from a state z is set to the (n − 1)st jump from the (n − 1)th jump from z.
723
+ Invariants. In addition to the invariants from Algorithm 1: for each unknown
724
+ canonical state x, jumps(x) is a list of states v0, v1, v2, . . . , vk such that:
725
+ – First jump: if the jump list is nonempty, then v0 = succ(v).
726
+ – Reachability: vi+1 is reachable from vi for all i.
727
+ – Powers of two: on the path of canonical states from v0 to vi, the total number
728
+ of states (including all the states in each equivalence class) is at least 2i.
729
+ The powers of two invariant is not necessary for correctness, but is the key
730
+ to the efficiency of this algorithm: it ensures that the nth jump travels at least
731
+ 2n states at once in the successor forest. It follows from this that if the jump list
732
+ is fully saturated for every state, querying GetRoot(z) will take logarithmic
733
+ time. But because the jump lists are updated lazily (and have dead states that
734
+ need to be discarded), this does not establish an asymptotic complexity for the
735
+ algorithm.
736
+ Theorem 3. Algorithm 3 is correct.
737
+ Proof. We use the invariant on the jump list mentioned: that v1 is the successor,
738
+ v2 is reachable from v1, v3 is reachable from v2, and so on, using only forward
739
+ edges added to the graph (not reserve edges). I.e., v1, v2, . . . is some sublist of
740
+ the states along the path from an unknown state to its root, potentially followed
741
+ by some dead states. We need to argue that the subprocedure GetRoot (i)
742
+ receives the same verdict as repeatedly calling succ to find a cycle in the first-cut
743
+ algorithm and (ii) preserves the jump list invariant. Popping dead states from the
744
+ jump list clearly preserves the invariant, as does adding on a state along the path
745
+ to the root, which is done when k′ ≥ k. Merging states preserves the jump list
746
+ invariant trivially because we throw the jump list away, and marking states live
747
+ preserves the jump list invariant trivially since the jump list is only maintained
748
+ and used for unknown states. Finally, marking states as closed initializes the
749
+ jump list correctly assuming that the successor is calculated correctly.
750
+ ⊓⊔
751
+ 4
752
+ Experimental Evaluation
753
+ The primary goal of our evaluation has been to experimentally validate the
754
+ performance of GIDs as a data structure in isolation, rather than their use in a
755
+ particular application. Our evaluation seeks to answer the following questions:
756
+ Q1 How does our approach (Algorithms 2 and 3) compare to the state-of-the-art
757
+ approach based on maintaining SCCs?
758
+
759
+ Incremental Dead State Detection in Logarithmic Time
760
+ 17
761
+ Q2 How does the performance of the studied algorithms vary when the class of
762
+ input graphs changes (e.g., sparse vs. dense graphs, structured vs. random
763
+ graphs)?
764
+ Q3 Finally, how do the studied algorithms perform on GIDs taken from the
765
+ example application to regexes described in Section 5?
766
+ To answer Q1, we put substantial implementation effort into a common
767
+ framework on which a fair comparison could be made between different ap-
768
+ proaches. To this end, we implemented GIDs as a data structure in Rust which
769
+ includes a graph data structure on top of which all algorithms are built. In par-
770
+ ticular, this equalizes performance across algorithms for the following baseline
771
+ operations: state and edge addition and retrieval, DFS and BFS search, edge
772
+ iteration, and state merging. We chose Rust for our implementation for its per-
773
+ formance, and because there does not appear to be an existing publicly available
774
+ implementation of BFGT in any other language. The number of lines of code
775
+ used to implement these various structures is summarized in Figure 4. We im-
776
+ plement Algorithms 2 and 3 and compare them with the following baselines:
777
+ BFGT This algorithm is a careful implementation of the state-of-the-art ap-
778
+ proach based on SCC maintenance, using worst-case amortized O(√m) time
779
+ per update [4].
780
+ Simple In addition to BFGT, we provide a simpler SCC-based baseline, using
781
+ a DFS instead of using the BFGT algorithm. It updates the SCCs after each
782
+ state is marked closed by searching for all cycles from that state using a
783
+ forward-DFS. This takes Θ(m2) in the worst case, but can be very efficient
784
+ when edges are added in a “forward-facing” topologically sorted manner, so
785
+ represents an important point of comparison.
786
+ Naive Finally, the naive algorithm is a greedy baseline that represents a worst-
787
+ case upper bound: it re-computes the entire set of dead states using a linear-
788
+ time DFS after each update. It is Θ(m2) for m updates.
789
+ To answer Q2, first, we compiled a range of basic graph classes which are
790
+ designed to expose edge case behavior in the algorithms, as well as randomly
791
+ generated graphs. We focus on graphs with no live states, as live states are
792
+ treated similarly by all algorithms. Most of the generated graphs come in 2×2 =
793
+ 4 variants: (i) the states are either read in a forwards- or backwards- order; and
794
+ (ii) they are either dead graphs, where there are no open states at the end and
795
+ so everything gets marked dead; or unknown graphs, where there is a single
796
+ open state at the end, so most states are unknown. In the unknown case, it is
797
+ sufficient to have one open state at the end, as many open states can be reduced
798
+ to the case of a single open state where all edges point to that one. We include
799
+ GIDs from line graphs and cycle graphs (up to 100K states in multiples of 3),
800
+ as well as complete graphs, complete acyclic graphs (with only forward-edges),
801
+ and bipartite graphs (up to 1K states). These are important cases, for example,
802
+ because the reverse-order version of the line and cycle graphs is an edge case for
803
+ Simple: it often times out on these, but performs well on the forward-version of
804
+ these GIDs.
805
+
806
+ 18
807
+ Caleb Stanford and Margus Veanes
808
+ Implementation
809
+ Component
810
+ LoC
811
+ Common Framework
812
+ 1197
813
+ Naive Algorithm
814
+ 78
815
+ Simple Algorithm
816
+ 98
817
+ BFGT Algorithm
818
+ 265
819
+ Algorithm 2 (Ours)
820
+ 253
821
+ Algorithm 3 (Ours)
822
+ 283
823
+ Euler Tour Trees
824
+ 1510
825
+ Experimental Scripts
826
+ 556
827
+ Separated Unit Tests
828
+ 798
829
+ Util
830
+ 217
831
+ Other
832
+ 69
833
+ Total
834
+ 5324
835
+ Category
836
+ Benchmark
837
+ Source
838
+ Qty
839
+ Basic
840
+ Line
841
+ 24
842
+ Cycle
843
+ 24
844
+ Complete
845
+ 18
846
+ Bipartite
847
+ 14
848
+ Total
849
+ 80
850
+ Random
851
+ Sparse
852
+ 260
853
+ Dense
854
+ 130
855
+ Total
856
+ 390
857
+ Regex
858
+ RegExLib [9,49]
859
+ 2061
860
+ 37
861
+ Handwritten [50]
862
+ 70
863
+ 26
864
+ Additional
865
+ 11
866
+ Total
867
+ 74
868
+ Fig. 4. Left: Lines of code for each algorithm and other implementation components.
869
+ Right: Benchmark GIDs used in our evaluation. Where present, the source column
870
+ indicates the quantity prior to filtering out trivially small graphs.
871
+ Time (ms)
872
+ Benchmarks Solved
873
+ 0
874
+ 30
875
+ 60
876
+ 90
877
+ 1
878
+ 10
879
+ 100
880
+ 1000
881
+ 10000
882
+ Naive
883
+ Simple
884
+ BFGT
885
+ Alg 2
886
+ Alg 3
887
+ Time (ms)
888
+ Benchmarks Solved
889
+ 0
890
+ 100
891
+ 200
892
+ 300
893
+ 400
894
+ 1
895
+ 10
896
+ 100
897
+ 1000
898
+ 10000
899
+ Naive
900
+ Simple
901
+ BFGT
902
+ Alg 2
903
+ Alg 3
904
+ Time (ms)
905
+ Benchmarks Solved
906
+ 0
907
+ 20
908
+ 40
909
+ 60
910
+ 80
911
+ 1
912
+ 10
913
+ 100
914
+ 1000
915
+ 10000
916
+ Naive
917
+ Simple
918
+ BFGT
919
+ Alg 2
920
+ Alg 3
921
+ Benchmark Size
922
+ Time (ms)
923
+ 1
924
+ 10
925
+ 100
926
+ 1000
927
+ 10000
928
+ 100
929
+ 1000
930
+ 10000
931
+ 100000
932
+ 1000000
933
+ Naive
934
+ Simple
935
+ BFGT
936
+ Alg 2
937
+ Alg 3
938
+ Benchmark Size
939
+ Average Time (ms)
940
+ 1
941
+ 10
942
+ 100
943
+ 1000
944
+ 10000
945
+ 100
946
+ 1000
947
+ 10000
948
+ 100000
949
+ 1000000
950
+ Naive
951
+ Simple
952
+ BFGT
953
+ Alg 2
954
+ Alg 3
955
+ Fig. 5. Evaluation results. Left: Cumulative plot showing the number of benchmarks
956
+ solved in time t or less for basic GID classes (top), randomly generated GIDs (middle),
957
+ and regex-derived GIDs (bottom). Top right: Scatter plot showing the size of each
958
+ benchmark vs time to solve. Bottom right: Average time to solve benchmarks of size
959
+ closest to s, where values of s are chosen in increments of 1/3 on a log scale.
960
+
961
+ Incremental Dead State Detection in Logarithmic Time
962
+ 19
963
+ Second, to exhibit more dynamic behavior, we generated random graphs:
964
+ sparse graphs with a fixed out-degree from each state, which can be either 1, 2, 3,
965
+ or 10 (up to 100K states); and dense graphs with a fixed probability of each edge,
966
+ which can be .01, .02, or .03 (up to 10K states). As with the basic graphs, states
967
+ are read in some order and marked closed; at most one state is left open at the
968
+ end. Each random graph is generated using 10 different random seeds.
969
+ To answer Q3, we needed to isolate the performance of the GID data struc-
970
+ ture itself, rather than the performance of the other components of the regex
971
+ SMT solver. Prior work on regex solving integrated with Z3 [50] implements
972
+ an earlier version of GIDs, which is essentially the Simple algorithm in C++.
973
+ However, preliminary tests suggest that on some examples, GIDs are already
974
+ quite efficient and contribute only a fraction of the performance (1-10%), as
975
+ a substantial amount of time is spent manipulating and rewriting expressions
976
+ and computing derivatives. While we would expect GIDs to be the bottleneck on
977
+ larger examples where the asymptotic complexity of Simple becomes prohibitive,
978
+ this fact makes it difficult to isolate the performance of the GID data structure
979
+ itself, which is the focus of this paper.
980
+ To isolate the GID performance, we therefore instrumented the modified Z3
981
+ solver code to export the (incremental) sequence of graph updates that would
982
+ be performed during a run of Z3 on existing regex benchmarks. For each regex
983
+ benchmark, this instrumented code produces a faithful representation of the se-
984
+ quence of graph updates that would occur in a run of the SMT solver on this
985
+ particular benchmark. It is also important to use ERE benchmarks, rather than
986
+ plain regular expressions as these are the ones for which dead state detection is
987
+ relevant (see Section 5). For each regex benchmark, we thus get a GID bench-
988
+ mark for the present paper. In our regex-derived GID examples, we include the
989
+ RegExLib benchmarks from [9,49] and the handcrafted Boolean benchmarks re-
990
+ ported in [50]. We add to these 11 additional examples designed to stress test the
991
+ GID side of a regex solver. The collection of regex SMT benchmarks is available
992
+ on GitHub4.
993
+ From both the Q2 and Q3 benchmarks, we filter out those that are too easy:
994
+ any benchmark which takes under 10 milliseconds for all of the algorithms to
995
+ solve (including Naive). We enforce a timeout of 60 seconds. The evaluation was
996
+ run on a 2020 MacBook Air running MacOS Monterey, containing an Apple M1
997
+ processor and 8GB of memory.
998
+ Correctness. To ensure that all of our implementations our correct, we also
999
+ invested time into unit testing and checked output correctness on all of our
1000
+ collected benchmarks, including several cases which exposed bugs in previous
1001
+ versions of one or more algorithms. In total, all algorithms are vetted against 25
1002
+ unit tests from handwritten edge cases that exposed prior bugs, 373 unit tests
1003
+ from benchmarks, and 30 module-level unit tests for specific functions.
1004
+ Results. Figure 5 shows the results. Algorithm 3 shows significant improve-
1005
+ ments over the state-of-the-art, solving more benchmarks in a smaller amount
1006
+ 4 https://github.com/cdstanford/regex-smt-benchmarks
1007
+
1008
+ 20
1009
+ Caleb Stanford and Margus Veanes
1010
+ of time across basic GIDs, random GIDs, and regex GIDs. Algorithm 2 also
1011
+ shows state-of-the-art performance, similar to BFGT on basic and regex GIDs
1012
+ and significantly better on random GIDs. On the bottom right, since looking at
1013
+ average time is not meaningful for benchmarks of widely varying size, we stratify
1014
+ the size of benchmarks into buckets, and plot time-to-solve as a function of size.
1015
+ note that as both x-axis and y-axis are on a log scale, a total running time of
1016
+ O(mp) for m updates corresponds to a line with slope p. The plot shows that
1017
+ Algorithm 3 exhibits up to two orders of magnitude speedup over BFGT for
1018
+ larger GIDs – we see speedups of 110x to 530x for GIDs in the top five size
1019
+ buckets (GIDs of size nearest to 100K, 200K, 500K, 1M, and 2M).
1020
+ New implementations of existing work. Our implementation contributes, to our
1021
+ knowledge, the first implementation of BFGT for SCC maintenance. In addition,
1022
+ it is one of the first implementations of Euler Tour Trees (EF) for undirected
1023
+ reachability in forests, including the AVL tree backing for EF which maintains
1024
+ a disjoint collection of lists of state and edge identifiers, and the first implemen-
1025
+ tation in Rust.
1026
+ 5
1027
+ Application to Extended Regular Expressions
1028
+ In this section, we describe one application of GIDs in the context of deciding
1029
+ regex constraints in SMT. In particular, we explain how precisely the GID state
1030
+ classification problem arises in the context of derivative-based solvers [50,34].
1031
+ We first define extended regexes (regexes extended with intersection & and
1032
+ complement ~) modulo a (symbolic) alphabet A that intuitively provides char-
1033
+ acter classes as predicates that are closed under Boolean operations. We explain
1034
+ the main idea behind (symbolic) derivatives [50] that provides the foundation
1035
+ for incrementally creating a GID. Then we show, through an example, how a
1036
+ solver can incrementally expand derivatives to reduce the satisfiability problem
1037
+ to the GID state classification problem (Definition 2).
1038
+ EREs or regexes are defined as follows where ϕ is a predicate in A.
1039
+ RE ::= ϕ | ε | RE1 · RE2 | RE* | RE1 | RE2 | RE1 & RE2 | ~RE
1040
+ Let Rk represent the concatenation of R k times. A regex R is nullable if it
1041
+ matches the empty string. Nullability is defined inductively over the structure
1042
+ of regexes with ε and R* being nullable by definition. Observe in particular that
1043
+ ~R is nullable iff R is not nullable, and R1 & R2 is nullable iff both R1 and R2
1044
+ are nullable. Terminal states are precisely the nullable regexes.
1045
+ The false predicate ⊥ of A serves as the regex that matches nothing and is
1046
+ a trivially dead state. Thus ~⊥ is equivalent to �* that matches everything and
1047
+ is a trivially live state, where � is the true predicate of A.
1048
+ A symbolic derivative δ(R) of a regex R is a binary tree or term t whose
1049
+ leaves are regexes and internal nodes are labeled by predicates from A – the two
1050
+ immediate subtrees t1 and t2 of a node with label ϕ, denoted by (ϕ ? t1 : t2),
1051
+ correspond to ϕ being true in t1 and false in t2. A branch with the accumulated
1052
+
1053
+ Incremental Dead State Detection in Logarithmic Time
1054
+ 21
1055
+ branch condition ψ from the root of t to one of its leaves R′ then defines a
1056
+ transition R
1057
+ ψ−→R′ – provided ψ is satisfiable in A – and the edge (R, R′) is added
1058
+ to the GID where the actual predicate ψ is no longer needed because the edge
1059
+ itself represents its feasibility. The number of leaves of δ(R) is the out-degree
1060
+ deg+(R) of R that is independent of the actual size of the concrete alphabet.
1061
+ Thus R becomes closed when deg+(R) many edges have been added from R.
1062
+ The formal definition of a symbolic derivative is as follows where the op-
1063
+ erations ⃝| , ⃝& , ⃝~ and ⃝· are here for the purposes of this paper and w.l.o.g.
1064
+ normalizing variants of | , & , ~ and ·, by distributing the operations into if-then-
1065
+ elses and build a single binary tree as a nested if-then-else as a result (see [50]
1066
+ for details):
1067
+ δ(ε) = δ(⊥) = ⊥
1068
+ δ(ϕ) = (ϕ ? ε : ⊥)
1069
+ δ(R∗) = δ(R) ⃝· R∗
1070
+ δ(R | R′) = δ(R) ⃝| δ(R′)
1071
+ δ(R & R′) = δ(R) ⃝& δ(R′)
1072
+ δ(~R) = ⃝~ δ(R)
1073
+ δ(R · R′) = if Nullable(R) then (δ(R) ⃝· R′) ⃝| δ(R′) else δ(R) ⃝· R′
1074
+ If c is a term of type character then δc(R) is obtained from δ(R) by instantiating
1075
+ all internal nodes (conditions of the if-then-elses) ϕ by tests c ∈ ϕ. This means
1076
+ that in the context of SMT, δc(R) is a term of type regex, while δ(R) represents
1077
+ the lambda-term λc.δc(R) that is constructed independently of c.
1078
+ Concretely, to apply Definition 1 to regexes, states will be regexes, and edges
1079
+ will represent transitions to their derivatives. A live state here is thus a regex
1080
+ that reaches a nullable regex via 0 or more edges. This implies that there exists
1081
+ a concrete string matching it. Conversely dead states are always empty, i.e. they
1082
+ match no strings, but can reach other dead states, creating strongly connected
1083
+ components of closed states none of which are live.
1084
+ 5.1
1085
+ Reduction from Incremental Regex Emptiness to GIDs
1086
+ For a solver based on derivatives of EREs, suppose we want to determine the
1087
+ satisfiability of a single regex constraint s ∈ R, where s is a string variable and
1088
+ R is a concrete regex. E.g., let L = ~(�*α�100) and R = L & (�α) where α is some
1089
+ character predicate in A s.t. both α and ¬α are satisfiable, e.g., take α to mean
1090
+ “is digit” to be concrete.5 The solver manipulates regex membership constraints
1091
+ on strings by unfolding them [50]. The constraint s ∈ R, that essentially tests
1092
+ nonemptiness of R with s as a witness, becomes
1093
+ (s = ϵ ∧ Nullable(R)) ∨ (s ̸= ϵ ∧ s1.. ∈ δs0(R))
1094
+ where, s ̸= ϵ since R is not nullable, si.. is the suffix of s from index i, and
1095
+ δ(R) = δ(L) ⃝& δ(�α) = (α ? L & ~(�100) : L) ⃝& α = (α ? L & ~(�100) & α : L & α)
1096
+ Let R1 = L & ~(�100) & α and R2 = L & α. So R has two outgoing transitions
1097
+ R
1098
+ α−→R1 and R
1099
+ ¬α
1100
+ −−→R2 that contribute the edges (R, R1) and (R, R2) into the
1101
+ GID. Note that these edges depend only on R and not on s0.
1102
+ 5 Using standard notations: \d denotes all digits, and [^\d] denotes all non-digits.
1103
+
1104
+ 22
1105
+ Caleb Stanford and Margus Veanes
1106
+ We now continue the search incrementally by checking the two branches of
1107
+ the if-then-else constraint, where R1 and R2 are again both not nullable (so
1108
+ s1.. ̸= ϵ):
1109
+ s0 ∈ α ∧ s2.. ∈ δs1(R1)
1110
+
1111
+ s0 ∈ ¬α ∧ s2.. ∈ δs1(R2)
1112
+ δ(R1) = (α ? L & ~(�100) & ~(�99) : L & ~(�99)) ⃝& (α ? ε : ⊥) = (α ? ε : ⊥)
1113
+ δ(R2) = (α ? L & ~(�100) : L) ⃝& (α ? ε : ⊥) = (α ? ε : ⊥)
1114
+ It follows that R1
1115
+ α−→ε and R2
1116
+ α−→ε, so the edges (R1, ε) and (R2, ε) are added to
1117
+ the GID where ϵ is a trivial terminal state. In fact, after R1 the search already
1118
+ terminates because we then have the path (R, R1)(R1, ϵ) that implies that R is
1119
+ live. The associated constraints s0 ∈ α and s1 ∈ α and the final constraint that
1120
+ s2.. = ϵ can be used to extract a concrete witness, e.g., s = "42". From the point
1121
+ of view of deciding nonemptiness the role of the witness s is immaterial, and
1122
+ can be omitted, as it poses no additional constraints on its own if s is a variable
1123
+ (a.k.a., an uninterpreted constant in SMT).
1124
+ Soundness of the algorithm follows from that if R is nonempty, i.e., s ∈ R
1125
+ is satisfiable, then by expanding the search, we eventually arrive at a nullable
1126
+ (terminal) regex, as in the example run above. To achieve completeness – and
1127
+ to eliminate dead states as early as possible – we incrementally construct a
1128
+ GID corresponding to the set of regexes seen so far (as above). After all the
1129
+ feasible transitions from R to its derivatives in δ(R) are added to the GID as
1130
+ edges (w.l.o.g. in one batch), the state R becomes closed. Crucially, due to the
1131
+ symbolic form of δ(R), no derivative is missing. Therefore R is known to be
1132
+ empty precisely as soon as R is detected as a dead state in the GID. We get the
1133
+ following theorem that uses finiteness of the closure of symbolic derivatives [50,
1134
+ Theorem 7.1]:
1135
+ Theorem 4. For any regex R, (1) If R is nonempty, then the decision procedure
1136
+ eventually marks R live. If n is the shortest distance from R to a terminal state,
1137
+ then a concrete witness of length n can be generated. (2) If R is empty, then the
1138
+ decision procedure marks R dead after a finite number of steps and terminates.
1139
+ Observe that any number of simultaneous regex constraints for a string s can be
1140
+ combined into single regex constraint by using the Boolean operations of ERE.
1141
+ More crucially, the algorithm is independent of the size of the universe of A,
1142
+ that may be very large, like the Unicode character set, or even infinite.
1143
+ 6
1144
+ Related Work
1145
+ Online graph algorithms: Online graph algorithms are typically divided into
1146
+ problems over incremental graphs (where edges are added), decremental graphs
1147
+ (where edges are deleted), and dynamic graphs (where edges are both added
1148
+ and deleted), with core data structures discussed in [38]. For dynamic directed
1149
+ graphs, important problems include transitive closure, cycle detection, topological
1150
+ ordering, and strongly connected component (SCC) maintenance. Maintaining
1151
+
1152
+ Incremental Dead State Detection in Logarithmic Time
1153
+ 23
1154
+ a topological order of dynamic DAGs is studied in [35]. An online algorithm
1155
+ for topological sorting that is experimentally shown to be preferable for sparse
1156
+ graphs is discussed in [46], and in a related article [45] it is also discussed how
1157
+ to extend such an algorithm to detect strongly connected components. The key
1158
+ applications of such algorithms have traditionally been in pointer analysis [44].
1159
+ Topological order of incremental DAGs is studied in [24], presenting two different
1160
+ algorithms, one for sparse graphs and one for dense graphs – the algorithms
1161
+ are also extended to work with SCCs. The sparse algorithm was subsequently
1162
+ simplified in [4] and is the basis of our implementation named BFGT in the
1163
+ Evaluation section. A unified approach of several algorithms based on [4] is
1164
+ presented in
1165
+ [14] that uses a notion of weak topological order and a labeling
1166
+ technique that estimates transitive closure size. Further extensions of [4] are
1167
+ studied in [6,8] based on randomization. Transitive closure of decremental DAGs
1168
+ is studied in [47] that improve upon some algorithms presented earlier in [25].
1169
+ Although we have not looked at decremental graphs in this paper, the problem
1170
+ may become relevant in the context of backtracking in SMT string theory solver
1171
+ during proof search in the presence of non-ground regexes, where properties such
1172
+ as nullability may be theory-dependent and thus affect liveness of states.
1173
+ Data structures for SMT: UnionFind [52] is a frequently used data structure
1174
+ that we also rely on in our algorithm. E-graphs [54,16,18,42] are used to ensure
1175
+ functional extensionality, where two expressions are equivalent if their subex-
1176
+ pressions are equivalent. In both cases the maintained relation is an equivalence
1177
+ relation. In contrast, maintaining live and dead states involves tracking reacha-
1178
+ bility rather than equivalence. While reachability is a basic problem in many core
1179
+ SMT algorithms, to the best of our knowledge, the formulation of the problem
1180
+ we consider here is new.
1181
+ Dead state elimination: Dead state elimination for automata, also known as
1182
+ trimming [7], is a specialized version of useless symbol elimination from context-
1183
+ free grammars [28, pp 88-89]. It plays an important role in minimization of
1184
+ automata [31,40,30,29]. The classical minimization algorithms have been studied
1185
+ extensively, e.g., in [10,7]. Brzozowski [11] observed that a DFA can be minimized
1186
+ by determinizing the reversal of the determinization of the reversal of the DFA.
1187
+ Regular expression matching and analysis: Decision problems for regexes are
1188
+ studied in the context of Satisfiability Modulo Theories (SMT), where the first-
1189
+ order objects are strings and formulas consist of string constraints such as x = yz
1190
+ or x ∈ R, where R is a regex. Over such theories, the problem of nonemptiness
1191
+ of a regex reduces to live state detection in the automaton (or set of derivatives)
1192
+ for R. String and regex constraints have been the focus of both SMT and the
1193
+ Constraint Programming (CP) solving communities, with a wide range of tools
1194
+ focusing on different problem classes and applications [27]. Regexes and are now a
1195
+ standard part of the SMTLIB2 format [53]. In SMT, string solvers are integrated
1196
+ in the CDCL(T) architecture [43]. In the CP community, the MiniZinc format
1197
+ integrates membership constraints over regular languages presented as either
1198
+
1199
+ 24
1200
+ Caleb Stanford and Margus Veanes
1201
+ DFAs or NFAs [39]. The solvers presented in [34] and [50] are based on computing
1202
+ Antimirov derivatives – a state graph of regexes is used in [50] to terminate search
1203
+ from dead state regular expressions that arise in ERE constraints, which helps
1204
+ the solver terminate search from goals that would otherwise not terminate.
1205
+ 7
1206
+ Conclusion
1207
+ GIDs are a form of incremental abstract transition system in which states are
1208
+ closed when they will receive no further outgoing edges; Algorithm 2 and Al-
1209
+ gorithm 3 solve the incremental live and dead detection problem in GIDs. The
1210
+ former runs in logarithmic worst-case time, and both algorithms achieve orders
1211
+ of magnitude speedup over the state-of-the-art based on maintaining strong con-
1212
+ nected components. Our implementation is open-source and publicly available6.
1213
+ Acknowledgments
1214
+ We would like to thank the anonymous reviewers of CAV 2021 and TACAS 2022
1215
+ for feedback leading to substantial improvements in both the paper and results.
1216
+ We also extend a special thanks to Nikolaj Bjørner, for his collaboration and
1217
+ involvement with the Z3 implementation, and Yu Chen, for helpful discussions
1218
+ in which he proposed the idea for the first-cut algorithm.
1219
+ References
1220
+ 1. Abboud, A., Williams, V.V.: Popular conjectures imply strong lower bounds for
1221
+ dynamic problems. In: 2014 IEEE 55th Annual Symposium on Foundations of
1222
+ Computer Science. pp. 434–443. IEEE (2014)
1223
+ 2. Backes, J., Bolignano, P., Cook, B., Dodge, C., Gacek, A., Luckow, K.S.,
1224
+ Rungta, N., Tkachuk, O., Varming, C.: Semantic-based automated reason-
1225
+ ing
1226
+ for
1227
+ AWS
1228
+ access
1229
+ policies
1230
+ using
1231
+ SMT.
1232
+ In:
1233
+ Bjørner,
1234
+ N.,
1235
+ Gurfinkel,
1236
+ A.
1237
+ (eds.)
1238
+ 2018
1239
+ Formal
1240
+ Methods
1241
+ in
1242
+ Computer
1243
+ Aided
1244
+ Design,
1245
+ FMCAD
1246
+ 2018,
1247
+ Austin,
1248
+ TX,
1249
+ USA,
1250
+ October
1251
+ 30
1252
+ -
1253
+ November
1254
+ 2,
1255
+ 2018.
1256
+ pp.
1257
+ 1–9.
1258
+ IEEE
1259
+ (2018). https://doi.org/10.23919/FMCAD.2018.8602994, https://doi.org/10.
1260
+ 23919/FMCAD.2018.8602994
1261
+ 3. Barrett, C., Conway, C.L., Deters, M., Hadarean, L., Jovanovi´c, D., King, T.,
1262
+ Reynolds, A., Tinelli, C.: Cvc4. In: International Conference on Computer Aided
1263
+ Verification. pp. 171–177. Springer (2011)
1264
+ 4. Bender, M.A., Fineman, J.T., Gilbert, S., Tarjan, R.E.: A new approach to incre-
1265
+ mental cycle detection and related problems. ACM Transactions on Algorithms
1266
+ 12(2), 14:1–14:22 (Dec 2015). https://doi.org/10.1145/2756553, http://arxiv.
1267
+ org/abs/1112.0784
1268
+ 5. Bernstein, A., Chechi, S.: Incremental topological sort and cycle detection in ex-
1269
+ pected total time. In: Proceedings of the Twenty-Ninth Annual ACM-SIAM Sym-
1270
+ posium on Discrete Algorithms. pp. 21–34. SIAM (2018)
1271
+ 6 https://github.com/cdstanford/gid
1272
+
1273
+ Incremental Dead State Detection in Logarithmic Time
1274
+ 25
1275
+ 6. Bernstein, A., Chechik, S.: Incremental topological sort and cycle detection in
1276
+ o(m∗sqrt(n)) expected total time. In: Proceedings of the 29th Annual ACM-SIAM
1277
+ Symposium on Discrete Algorithms. pp. 21–34. SODA’18, Society for Industrial
1278
+ and Applied Mathematics (2018)
1279
+ 7. Berstel, J., Boasson, L., Carton, O., Fagnot, I.: Minimization of automata (2011),
1280
+ handbook of Automata
1281
+ 8. Bhattacharya, S., Kulkarni, J.: An improved algorithm for incremental cycle detec-
1282
+ tion and topological ordering in sparse graphs. In: Proceedings of the Fourteenth
1283
+ Annual ACM-SIAM Symposium on Discrete Algorithms. pp. 2509–2521. SIAM
1284
+ (2020)
1285
+ 9. Bjørner, N., Ganesh, V., Michel, R., Veanes, M.: An SMT-LIB format for sequences
1286
+ and regular expressions. In: Fontaine, P., Goel, A. (eds.) SMT’12. pp. 76–86 (2012)
1287
+ 10. Blum, N.: An 0(n log n) implementation of the standard method for minimizing
1288
+ n-state finite automata. Information Processing Letters 57, 65–69 (1996)
1289
+ 11. Brzozowski, J.A.: Canonical regular expressions and minimal state graphs for def-
1290
+ inite events. In: Proc. Sympos. Math. Theory of Automata. pp. 529—-561. New
1291
+ York (1963)
1292
+ 12. Caron, P., Champarnaud, J.M., Mignot, L.: Partial derivatives of an extended
1293
+ regular expression. In: Language and Automata Theory and Applications, LATA
1294
+ 2011. LNCS, vol. 6638, pp. 179–191. Springer (2011)
1295
+ 13. Clarke, E., Grumberg, O., Hamaguchi, K.: Another look at LTL model checking. In:
1296
+ International Conference on Computer Aided Verification. pp. 415–427. Springer
1297
+ (1994)
1298
+ 14. Cohen, E., Fiat, A., Kaplan, H., Roditty, L.: A Labeling Approach to Incremental
1299
+ Cycle Detection. arXiv e-prints (Oct 2013)
1300
+ 15. CVC4: (2020), https://github.com/CVC4/CVC4
1301
+ 16. De Moura, L., Bjørner, N.: Efficient e-matching for smt solvers. In: International
1302
+ Conference on Automated Deduction. pp. 183–198. Springer (2007)
1303
+ 17. De Moura, L., Bjørner, N.: Satisfiability modulo theories: introduction and appli-
1304
+ cations. Communications of the ACM 54(9), 69–77 (2011)
1305
+ 18. Downey, P.J., Sethi, R., Tarjan, R.E.: Variations on the common subexpression
1306
+ problem. Journal of the ACM (JACM) 27(4), 758–771 (1980)
1307
+ 19. Ellul, K., Krawetz, B., Shallit, J., Wang, M.W.: Regular expressions: New results
1308
+ and open problems. J. Autom. Lang. Comb. 10(4), 407–437 (2005)
1309
+ 20. Eppstein, D., Galil, Z., Italiano, G.F.: Dynamic graph algorithms. Algorithms and
1310
+ theory of computation handbook 1, 9–1 (1999)
1311
+ 21. Fan, W., Hu, C., Tian, C.: Incremental graph computations: Doable and undoable.
1312
+ In: Proceedings of the 2017 ACM International Conference on Management of
1313
+ Data. pp. 155–169 (2017)
1314
+ 22. Gelade, W., Neven, F.: Succinctness of the complement and intersection of regular
1315
+ expressions. arXiv preprint arXiv:0802.2869 (2008)
1316
+ 23. Haeupler, B., Kavitha, T., Mathew, R., Sen, S., Tarjan, R.E.: Incremental cycle
1317
+ detection, topological ordering, and strong component maintenance. ACM Trans-
1318
+ actions on Algorithms (TALG) 8(1), 1–33 (2012)
1319
+ 24. Haeupler,
1320
+ B.,
1321
+ Kavitha,
1322
+ T.,
1323
+ Mathew,
1324
+ R.,
1325
+ Sen,
1326
+ S.,
1327
+ Tarjan,
1328
+ R.E.:
1329
+ Incre-
1330
+ mental cycle detection, topological ordering, and strong component main-
1331
+ tenance.
1332
+ ACM
1333
+ Transactions
1334
+ on
1335
+ Algorithms
1336
+ 8(1.3),
1337
+ 1–33
1338
+ (January
1339
+ 2012).
1340
+ https://doi.org/10.1145/2071379.2071382
1341
+ 25. Henzinger, M., King, V.: Fully dynamic biconnectivity and transitive closure. In:
1342
+ Proceedings of the 36th Annual Symposium on Foundations of Computer Science.
1343
+ pp. 664–672. Milwaukee, WI (1995)
1344
+
1345
+ 26
1346
+ Caleb Stanford and Margus Veanes
1347
+ 26. Henzinger, M.R., King, V.: Randomized fully dynamic graph algorithms with poly-
1348
+ logarithmic time per operation. Journal of the ACM (JACM) 46(4), 502–516 (1999)
1349
+ 27. Hojjat, H., R¨ummer, P., Shamakhi, A.: On strings in software model checking. In:
1350
+ Lin, A. (ed.) APLAS. LNCS, vol. 11893. Springer (2019)
1351
+ 28. Hopcroft, J.E., Ullman, J.D.: Introduction to Automata Theory, Languages, and
1352
+ Computation. Addison Wesley (1979)
1353
+ 29. Hopcroft, J.: An n log n algorithm for minimizing states in a finite automaton. In:
1354
+ Kohavi, Z. (ed.) Theory of machines and computations, Proc. Internat. Sympos.,
1355
+ Technion, Haifa, 1971. pp. 189–196. Academic Press, New York (1971)
1356
+ 30. Hopcroft, J.E., Ullman, J.D.: Formal languages and their relation to automata.
1357
+ Addison-Wesley Longman Publishing Co., Inc., Boston, MA, USA (1969)
1358
+ 31. Huffman, D.: The synthesis of sequential switching circuits. Journal of the Franklin
1359
+ Institute 257(3–4), 161–190,275–303 (1954)
1360
+ 32. Kupferman, O., Vardi, M.Y.: Model checking of safety properties. Formal Methods
1361
+ in System Design 19(3), 291–314 (2001)
1362
+ 33. programming language., T.R.: (2020), https://www.rust-lang.org/
1363
+ 34. Liang, T., Tsiskaridze, N., Reynolds, A., Tinelli, C., Barrett, C.: A decision pro-
1364
+ cedure for regular membership and length constraints over unbounded strings?
1365
+ In: FroCoS 2015: Frontiers of Combining Systems. LNCS, vol. 9322, pp. 135–150.
1366
+ Springer (2015)
1367
+ 35. Marchetti-Spaccamela, A., Nanni, U., Rohnert, H.: Maintaining a topologi-
1368
+ cal order under edge insertions. Information Processing Letters 59(1), 53–58
1369
+ (1996). https://doi.org/https://doi.org/10.1016/0020-0190(96)00075-0, https://
1370
+ www.sciencedirect.com/science/article/pii/0020019096000750
1371
+ 36. Marques-Silva, J., Lynce, I., Malik, S.: Conflict-driven clause learning sat solvers.
1372
+ In: Handbook of satisfiability, pp. 131–153. ios Press (2009)
1373
+ 37. Matsakis, N.D., Klock, F.S.: The rust language. ACM SIGAda Ada Letters 34(3),
1374
+ 103–104 (2014)
1375
+ 38. Mehlhorn, K.: Data Structures and Algorithms, Graph Algorithms and NP-
1376
+ Completeness, vol. 2. Springer (1984)
1377
+ 39. MiniZinc: https://www.minizinc.org (2020)
1378
+ 40. Moore, E.F.: Gedanken-experiments on sequential machines. Automata studies,
1379
+ Annals of mathematics studies pp. 129––153 (1956)
1380
+ 41. de Moura, L., Bjørner, N.: Z3: An Efficient SMT Solver. In: TACAS’08. pp. 337–
1381
+ 340. LNCS, Springer (2008)
1382
+ 42. Nelson, G., Oppen, D.C.: Fast decision procedures based on congruence closure.
1383
+ Journal of the ACM (JACM) 27(2), 356–364 (1980)
1384
+ 43. Nieuwenhuis, R., Oliveras, A., Tinelli, C.: Solving SAT and SAT modulo theories:
1385
+ From an abstract davis–putnam–logemann–loveland procedure to dpll(T). J. ACM
1386
+ 53(6), 937–977 (2006). https://doi.org/10.1145/1217856.1217859, https://doi.
1387
+ org/10.1145/1217856.1217859
1388
+ 44. Pearce, D.J.: Some directed graph algorithms and their application to pointer
1389
+ analysis. Ph.D. thesis, Imperial College, London (2005)
1390
+ 45. Pearce, D.J., Kelly, P.H.J.: A dynamic algorithm for topologically sorting directed
1391
+ acyclic graphs. In: Proceedings of the Workshop on Efficient and experimental
1392
+ Algorithms (WEA). LNCS, vol. 3059, pp. 383–398. Springer (2004)
1393
+ 46. Pearce, D.J., Kelly, P.H.J.: A dynamic topological sort algorithm for directed
1394
+ acyclic graphs. ACM Journal of Experimental Algorithmics 11(1.7), 1–24 (2006)
1395
+ 47. Roditty, L., Zwick, U.: Improved dynamic reachability algorithms for di-
1396
+ rected
1397
+ graphs.
1398
+ SIAM
1399
+ Journal
1400
+ on
1401
+ Computing
1402
+ 37(5),
1403
+ 1455–1471
1404
+ (2008).
1405
+ https://doi.org/10.1137/060650271
1406
+
1407
+ Incremental Dead State Detection in Logarithmic Time
1408
+ 27
1409
+ 48. Rozier, K.Y., Vardi, M.Y.: LTL satisfiability checking. In: International SPIN
1410
+ Workshop on Model Checking of Software. pp. 149–167. Springer (2007)
1411
+ 49. SMT:
1412
+ (2012),
1413
+ https://www.microsoft.com/en-us/research/wp-
1414
+ content/uploads/2016/02/nbjorner-microsoft.automata.smtbenchmarks.zip
1415
+ 50. Stanford, C., Veanes, M., Bjørner, N.: Symbolic Boolean derivatives for efficiently
1416
+ solving extended regular expression constraints. In: Proceedings of the 42nd ACM
1417
+ SIGPLAN International Conference on Programming Language Design and Imple-
1418
+ mentation. pp. 620–635 (2021)
1419
+ 51. Stockmeyer, L.J., Meyer, A.R.: Word problems requiring exponential time (pre-
1420
+ liminary report). In: Proceedings of the fifth annual ACM symposium on Theory
1421
+ of computing. pp. 1–9 (1973)
1422
+ 52. Tarjan, R.E.: Efficiency of a good but not linear set union algorithm. JACM 22,
1423
+ 215–225 (1975)
1424
+ 53. Tinelli, C., Barrett, C., Fontaine, P.: (2020), http://smtlib.cs.uiowa.edu/theories-
1425
+ UnicodeStrings.shtml
1426
+ 54. Willsey, M., Nandi, C., Wang, Y.R., Flatt, O., Tatlock, Z., Panchekha, P.: egg:
1427
+ fast and extensible equality saturation. Proceedings of the ACM on Programming
1428
+ Languages 5(POPL), 1–29 (2021)
1429
+ 55. Z3: (2020), http://research.microsoft.com/projects/z3
1430
+ A
1431
+ Appendix
1432
+ A.1
1433
+ Extension to the Framework: Non-Reachable Updates
1434
+ When GIDs are viewed as an abstract data type, other application-specific
1435
+ heuristics can be incorporated. To illustrate this point, we consider another piece
1436
+ of application-specific information: assertions which state that one vertex is not
1437
+ (and will never be) reachable from another. We discuss how to incorporate such
1438
+ updates to further improve the optimized algorithm in practice to exploit such
1439
+ updates.
1440
+ First, we augment the definition of guided incremental digraph to allow up-
1441
+ dates of the form N(u, v), which labels that v is not reachable from u. We then
1442
+ say that the graph is valid if, in addition to the previous conditions, for every
1443
+ update N(u, v), v is not reachable from u via a sequence of edges in the final
1444
+ digraph. Figure 6 extends the example presented in Figures 1 and 2 with a not-
1445
+ reachable update. Notice that N(3, 5) does not affect the set of live and dead
1446
+ states, but may be exploited for more efficient search.
1447
+ To incorporate N(u, v) updates in our algorithm, when moving a reserve edge
1448
+ into the graph, N(u, v) can be used to shortcut the whole procedure: if we add
1449
+ an edge (v, u) in particular, then we know that this doesn’t create a cycle, and
1450
+ we don’t need to repeatedly call succz at all. We store N(u, v) updates in a set
1451
+ instead of a list for O(1) querying; if we need to merge them, we just discard the
1452
+ set if it gets too large. Therefore, in addition what was maintained previously,
1453
+ the extended algorithm tracks, for each unknown canonical state x, a set of
1454
+ non-reachable edges (x, y), where each corresponds to some update N(u, v) with
1455
+ x = find(u) and y = find(v) (but not necessarily vice versa); and a constant κ
1456
+ which bounds the amount of work when merging non-reachable sets (additional
1457
+
1458
+ 28
1459
+ Caleb Stanford and Margus Veanes
1460
+ 1
1461
+ 2
1462
+ 3
1463
+ 4
1464
+ 5
1465
+ Fig. 6. Extension to our framework: guided incremental digraph from Figures 1 and 2,
1466
+ with an added update N(3, 5) which states that 5 is not reachable from 3. In this graph,
1467
+ it would be invalid to then add E(3, 4) to the graph. This does not change the set of
1468
+ live or dead states, but may be exploited for more efficient search: when checking if
1469
+ state 3 is live or dead, we may ignore states 4 and 5.
1470
+ elements will be discarded). We modify the code for is-root(y, x) with a single
1471
+ check in the beginning of the procedure: if x is in the not-reachable set from y,
1472
+ return false. Otherwise, we continue as normal. Because this algorithm uses the
1473
+ N(u, v) information directly and conservatively, assuming the input GID is valid,
1474
+ it remains correct.
1475
+ A.2
1476
+ Reachability Problems on EREs
1477
+ For extended regexes X and Y , we say that Y is reachable from X if Y ∈ ∂⋆(X).
1478
+ We say that X and Y are strongly connected, denoted X ⟳ Y , when both
1479
+ Y ∈ ∂⋆(X) and X ∈ ∂⋆(Y ), i.e., when both Y is reachable from X and X is
1480
+ reachable from Y through derivation as defined above.
1481
+ We also consider the following subclasses of extended regexes:
1482
+ – Regexes (REs) are EREs that do not contain complement or intersection,
1483
+ that is, classical regexes.
1484
+ – Semi-extended regexes (SEREs) are EREs that do not contain complement.
1485
+ – Intersections of regexes (IREs) are SEREs that are equal to an intersection
1486
+ of REs.
1487
+ – Boolean combinations of regexes (BCREs) are EREs that are equal to a
1488
+ Boolean combination (combination of intersection and complement) of REs.
1489
+ Semantically, all of these capture the set of regular languages. The syntactic
1490
+ relationships between these different subclasses is summarized as follows, where
1491
+ all inclusions are strict:
1492
+ RE ⊂ IRE ⊂ BCRE
1493
+
1494
+
1495
+ SERE ⊂ ERE
1496
+ We consider the following three decision problems. They are closely related
1497
+ and in order of generality: the first is a special case of both the second and
1498
+ thirds, and the second reduces to two queries of the third. For each problem,
1499
+ we also consider problems on all the subclasses of EREs that we defined: REs,
1500
+ SEREs, IREs, and BCREs. We summarize the complexity results in the table in
1501
+ Figure 7.
1502
+
1503
+ Incremental Dead State Detection in Logarithmic Time
1504
+ 29
1505
+ Subclass
1506
+ Edge in Cycle
1507
+ Strong Connectedness
1508
+ Reachability
1509
+ RE
1510
+ Linear
1511
+ Linear
1512
+ Linear
1513
+ IRE
1514
+ PSPACE-C
1515
+ PSPACE-C
1516
+ PSPACE-C
1517
+ BCRE
1518
+ PSPACE-C
1519
+ PSPACE-C
1520
+ PSPACE-C
1521
+ SERE
1522
+ PSPACE-C
1523
+ PSPACE-C
1524
+ PSPACE-C
1525
+ ERE
1526
+ non-elementary
1527
+ non-elementary
1528
+ non-elementary
1529
+ Fig. 7. Complexity results for reachability of various subclasses of extended regexes.
1530
+ Edge in Cycle: Given X and Y such that X ∈ ∂(Y ), is Y ∈ ∂⋆(X)?
1531
+ Strong Connectedness: Given X and Y , is X ⟳ Y ?
1532
+ Reachability: Given X and Y , is Y ∈ ∂⋆(X)?
1533
+ Theorem 5. For RE, the edge-in-cycle, strong-connectedness, and reachability
1534
+ problems can be solved in linear time.
1535
+ Proof. It suffices to show that reachability can be solved in linear time, since as
1536
+ stated above, it is the most general of the three. We give a decision procedure for
1537
+ deciding Y ∈ ∂⋆(X) by recursing on the structure of X. Prior to the procedure,
1538
+ we ensure (i) that all regexes are normalized, (ii) that equal regexes are repre-
1539
+ sented by the same pointer, and (iii) that concatenations are represented as lists
1540
+ and precompute the length of each list, so that for any subexpression X0 of X,
1541
+ whether Y = Y1·X0 can be checked in constant time. This precomputation takes
1542
+ O(|X| + |Y |) time. The recursion then works as follows, on input (X, Y ). (1) If
1543
+ X = ϕ, ε, or ⊥, we can check directly if Y is one of the finitely many derivatives.
1544
+ (2) If X = X1 | X2, we observe that Y ∈ ∂⋆(X) if and only if Y ∈ ∂⋆(X1) or
1545
+ Y ∈ ∂⋆(X2). So we recurse on (X1, Y ) and (X2, Y ). (3) If X = X∗
1546
+ 1, we observe
1547
+ that Y ∈ ∂⋆(X) if and only if Y = X′
1548
+ 1(X∗
1549
+ 1) for X′
1550
+ 1 ∈ ∂⋆(X1). So we first check
1551
+ if Y has the form Y1X∗
1552
+ 1, then recurse on (X1, Y1). (4) Finally, if X = X1 · X2,
1553
+ we observe that Y ∈ ∂⋆(X) if and only if either Y = X′
1554
+ 1 · X2 for X′
1555
+ 1 ∈ ∂⋆(X1),
1556
+ or Y = X′
1557
+ 2 for X′
1558
+ 2 ∈ ∂⋆(X2). (Note that this relies on the fact that X1 is an
1559
+ RE, not an ERE, so we know it is nonempty and in particular ε ∈ ∂⋆(X1).) So
1560
+ we first check if Y has the form Y1 · X2, if so recursing on both (X1, Y1) and
1561
+ (X2, Y ), otherwise recursing on just (X2, Y ). Since the procedure never recurses
1562
+ twice on X or twice on the same subexpression of X, it takes O(|X|) time.
1563
+ ⊓⊔
1564
+ Theorem 6. For the classes IRE, BCRE, and SERE, the edge-in-cycle, strong-
1565
+ connectedness, and reachability problems are PSPACE-complete.
1566
+ Proof. It suffices to show: (1) the edge-in-cycle problem for IRE is PSPACE-
1567
+ hard; (2) the reachability problem for BCRE is in PSPACE; and (3) the reach-
1568
+ ability problem for SERE is in PSPACE. (1) We reduce from intersection-
1569
+ nonemptiness of REs, which is known to be PSPACE-hard. Given a list of
1570
+ REs R1, R2, . . . , Rk, then let b be a fresh character that does not appear in
1571
+
1572
+ 30
1573
+ Caleb Stanford and Margus Veanes
1574
+ R1, . . . , Rk, and construct the IRE X = (bR1)∗ & (bR2)∗ & · · · & (bRk)∗. X has
1575
+ only one derivative X′, which is an intersection where the ith term is Ri(bRi)∗.
1576
+ Then the edge-in-cycle property for (X, X′) holds if and only if the intersection
1577
+ of Ri is nonempty, because ∂b is empty for all subexpressions of Ri except ε,
1578
+ so the only way to get back to X is by stepping all Ri to ε simultaneously. (2)
1579
+ We first convert the BCRE X to an alternating automaton (AFA): first convert-
1580
+ ing the RE subexpressions to NFAs, then using the standard constructions for
1581
+ Boolean operations on AFAs. (3) For an SERE X, for any X′ ∈ ∂⋆(X) we show
1582
+ that the size of X′ is bounded linearly in the size of the X. This can be seen
1583
+ by induction on the structure of X; for example, elements of ∂⋆(X1 & X2) are a
1584
+ pair of an element of ∂⋆(X1) and an element of ∂⋆(X2). It follows that we can
1585
+ provide the following NPSPACE algorithm for reachability of (X, Y ): at each
1586
+ step, pick a derivative nondeterministically of X and recurse.
1587
+ ⊓⊔
1588
+ Theorem 7. For ERE, the edge-in-cycle, strong-connectedness, and reachability
1589
+ problems are non-elementary.
1590
+ Proof. It suffices to show that the edge-in-cycle problem is non-elementary. We
1591
+ reduce from the nonemptiness problem for EREs, which is known to be non-
1592
+ elementary [51]. Given an ERE R, we let b be a fresh character not appearing
1593
+ in R, and we let X = (bR)∗. Then X has one derivative, X′ = R(bR)∗. The
1594
+ edge-in-cycle property for (X, X′) holds and only if R is nonempty, because R
1595
+ is nonempty exactly when ε ∈ ∂⋆(R).
1596
+ ⊓⊔
1597
+
-NE4T4oBgHgl3EQf3w1M/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
-dE1T4oBgHgl3EQfUgPr/content/tmp_files/2301.03092v1.pdf.txt ADDED
@@ -0,0 +1,1059 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Deep Injective Prior for Inverse Scattering
3
+ AmirEhsan Khorashadizadeh , Sepehr Eskandari , Vahid Khorashadi-Zadeh
4
+ and Ivan Dokmani´c
5
+ Abstract—In electromagnetic inverse scattering, we aim
6
+ to reconstruct object permittivity from scattered waves. Deep
7
+ learning is a promising alternative to traditional iterative solvers,
8
+ but it has been used mostly in a supervised framework to regress
9
+ the permittivity patterns from scattered fields or back-projections.
10
+ While such methods are fast at test-time and achieve good results
11
+ for specific data distributions, they are sensitive to the distribution
12
+ drift of the scattered fields, common in practice. If the distribu-
13
+ tion of the scattered fields changes due to changes in frequency,
14
+ the number of transmitters and receivers, or any other real-world
15
+ factor, an end-to-end neural network must be re-trained or fine-
16
+ tuned on a new dataset. In this paper, we propose a new data-
17
+ driven framework for inverse scattering based on deep generative
18
+ models. We model the target permittivities by a low-dimensional
19
+ manifold which acts as a regularizer and learned from data.
20
+ Unlike supervised methods which require both scattered fields
21
+ and target signals, we only need the target permittivities for
22
+ training; it can then be used with any experimental setup. We
23
+ show that the proposed framework significantly outperforms the
24
+ traditional iterative methods especially for strong scatterers while
25
+ having comparable reconstruction quality to state-of-the-art deep
26
+ learning methods like U-Net.
27
+ I. INTRODUCTION
28
+ E
29
+ LECTROMAGNETIC inverse scattering is the problem of
30
+ determining the electromagnetic properties of unknown
31
+ objects from how they scatter incident fields. As it is non-
32
+ destructive, it has a variety of applications in different areas,
33
+ such as early detection of breast cancer [1], mineral prospect-
34
+ ing [2], detecting defects and cracks inside objects [3], imaging
35
+ through the wall [4] and remote sensing [5].
36
+ We consider the reconstruction of the finite number of
37
+ parameters of the object from the scattered fields. While inverse
38
+ scattering is well-posed and Lipschitz stable in theory, when
39
+ full-aperture continuous measurements are available [6], it is
40
+ Manuscript received December 29, 2022.
41
+ AmirEhsan Khorashadizadeh and Ivan Dokmani´c were supported by the
42
+ European Research Council Starting Grant 852821–SWING.
43
+ AmirEhsan Khorashadizadeh is with the Department of Mathematics and
44
+ Computer Science of the University of Basel, 4001 Basel, Switzerland (e-mail:
45
46
+ Sepehr Eskandari is with Microwave Systems, Sensors, and Imaging Lab
47
+ (MiXIL), University of Southern California, Los Angeles, CA 90089 USA
48
+ (e-mail: [email protected]).
49
+ Vahid Khorashadi-Zadeh is with School of Electric and Computer
50
+ Engineering, University of Tehran, Tehran 14399-57131, Iran (e-mail:
51
52
+ Ivan Dokmani´c is with the Department of Mathematics and Computer
53
+ Science of the University of Basel, 4001 Basel, Switzerland, and also
54
+ with the Department of Electrical, Computer Engineering, the Univer-
55
+ sity of Illinois at Urbana-Champaign, Urbana, IL 61801 USA (e-mail:
56
57
+ Our
58
+ implementation
59
+ is
60
+ available
61
+ at
62
+ https://github.com/swing-research/
63
+ scattering injective prior.
64
+ a severely ill-posed inverse problem for a finite number of
65
+ measurements. This means that a small perturbation in the
66
+ scattered fields may lead to a large error in the reconstructed
67
+ permittivity pattern [7]. Moreover, the forward operator from
68
+ the permittivity to the scattered fields is nonlinear, which
69
+ further complicates the inversion. The nonlinearity is due to the
70
+ multiple scattering; the problem becomes more nonlinear as the
71
+ permittivity contrast increases [7]. All these together make the
72
+ inverse scattering challenging, especially for strong scatterers
73
+ (objects with large permittivity) and noisy measurements, which
74
+ require an effective regularization to restrict the search space
75
+ and enable accurate reconstruction.
76
+ A number of optimization-based methods are proposed to
77
+ address nonlinearity and ill-posedness of the inverse scattering
78
+ including Born iterative [8], distorted Born iterative method
79
+ (DBIM) [9], contrast source inversion (CSI) [10], and subspace-
80
+ based optimization (SOM) [11]. Although these methods have
81
+ been shown to be effective for objects with small permittivity,
82
+ they do not give an accurate reconstruction for large permittivity.
83
+ All the above methods iteratively optimize a regularized
84
+ objective, with a hand-crafted regularization term.
85
+ Recently, thanks to the significant increase in computing
86
+ power and the availability of sufficient data, data-driven
87
+ approaches to inverse scattering have started receiving attention.
88
+ Most deep learning models for inverse scattering use a super-
89
+ vised learning approach, which takes the scattered fields or a
90
+ simple back-projection as input and trains a deep neural network
91
+ to produce the target permittivity pattern. The authors of [12]–
92
+ [14] used scattered fields as input. While this approach provides
93
+ good reconstructions even for objects with strong scatterers [14],
94
+ it is sensitive to the experiment configuration such as the
95
+ frequency or the number of incident waves and receivers; if
96
+ the distribution of the scattered fields in test-time slightly
97
+ changes, the quality of reconstructions remarkably degrades;
98
+ the model requires new training data which is too costly.
99
+ One strategy to tackle this issue is to use back-projections
100
+ as input instead of the raw scattered fields, thus enabling
101
+ the use of convolutional neural networks [15]–[17]. While
102
+ this approach is shown to be effective for objects with small
103
+ and moderate permittivity, the quality of the back-projections
104
+ significantly drops in large permittivity (Figure 3), which leads
105
+ to a drop in the reconstruction quality [14]. On the other hand,
106
+ supervised learning methods are also potentially vulnerable
107
+ to adversarial attacks [18], which is problematic in medical
108
+ applications [19]. Moreover, incorporating the well-known
109
+ physics of the scattering problem (forward operator), which
110
+ can strikingly improve the accuracy of the reconstructions, is
111
+ not easy in supervised learning models [20].
112
+ arXiv:2301.03092v1 [cs.LG] 8 Jan 2023
113
+
114
+ 2
115
+ To tackle these issues, we propose a deep learning
116
+ approach to inverse scattering using injective generative models.
117
+ The proposed method benefits from an unsupervised learning
118
+ framework—the training phase uses only the target permittivity
119
+ patterns and the physics of scattering is fully incorporated into
120
+ the solution. Deep generative models are a class of unsupervised
121
+ learning methods that model the probability distribution of
122
+ data by using deep neural networks. Latent-variable generative
123
+ models such as generative adversarial networks (GAN) [21],
124
+ [22], variational autoencoders [23], normalizing flows [24]–
125
+ [26] and diffusion models [27] train a deep neural network to
126
+ transform the samples of a simple (Gaussian) distribution to
127
+ those of target data distribution. We expect a trained generator
128
+ produce plausible samples similar to the training data when
129
+ taking samples of a Gaussian distribution as input.
130
+ Bora et al. [28] used GANs to constrain the solution
131
+ domain to the range of the trained generator for solving
132
+ compressed sensing inverse problems. The strength of this
133
+ idea is that it requires only the target signals for training and
134
+ does not know anything about the inverse problem is going to
135
+ be solved. As soon as the generator is trained, it can be used
136
+ to solve any inverse problem with a known forward operator.
137
+ This property makes the model robust to the distribution shift
138
+ of the measurements and adversarial attack.
139
+ Several methods have tried to improve the quality of
140
+ reconstructions; Kelkar et al. [29] used the popular style-
141
+ GAN generator [30] while Hussein et al. [31] jointly optimize
142
+ the generator weights with latent codes to further reduce
143
+ the reconstructions error. However, GANs are known to be
144
+ unstable in training, and the optimization process over latent
145
+ space sticks in the local minimum which requires many
146
+ restarts [28]. Recently, normalizing flows as a generator instead
147
+ of GANs have been shown to have a stable optimization
148
+ landscape [32], [33]. Nevertheless, normalizing flows have
149
+ their own drawbacks; having a latent code with the same
150
+ dimension as data space makes them too expensive in training
151
+ and does not provide an appropriate constraint in the generator
152
+ output, leading to poor reconstruction for ill-posed inverse
153
+ problems. More recently, injective normalizing flows [34], [35]
154
+ are proposed to alleviate these issues. Injective flows are a
155
+ class of deep generative models that are well-suited for solving
156
+ ill-posed inverse problems. These models unlike GANs give us
157
+ access to the approximate likelihood of the generated samples,
158
+ which provides a likelihood-based regularization. Moreover,
159
+ injective flows unlike regular normalizing flows benefit from
160
+ a low-dimensional latent space which provides an additional
161
+ effective regularizer for ill-posed inverse problems. Injective
162
+ flows have been shown in [35] to effectively solve linear inverse
163
+ problems.
164
+ In this paper, we use injective flows as the generator
165
+ to solve full-wave inverse scattering. we will show that the
166
+ proposed method effectively solves inverse scattering even for
167
+ objects with large permittivity for which traditional methods
168
+ fail. Moreover, we use a data-driven initial guess for starting
169
+ the iterative optimization, which significantly outperforms the
170
+ simple back-projection initialization used in the traditional
171
+ Fig. 1: The setup for the inverse scattering problem, red
172
+ arrows show the incident plane waves; the green circles are
173
+ the receivers.
174
+ methods. Finally, we show that the proposed framework yields
175
+ reconstructions of comparable or better quality to highly
176
+ successful supervised methods like the U-Net [36].
177
+ II. FORWARD AND INVERSE SCATTERING
178
+ We begin our discussion with equations governing the
179
+ 2D forward and inverse scattering problem. We consider
180
+ two-dimensional transverse magnetic scattering, where the
181
+ longitudinal direction is along the z direction (TMz). As shown
182
+ in Figure 1, non-magnetic scatterers with permittivity ϵr in
183
+ a vacuum background with permittivity ϵ0 and permeability
184
+ µ0 are located in investigation domain Dinv which is a D×D
185
+ square, and are illuminated by Ni plane waves with equispaced
186
+ directions. We have Nr receivers placed uniformly on a circle S
187
+ with radius R which measure the scattered fields. The forward
188
+ scattering problem can be derived from the time-harmonic form
189
+ of Maxwell’s equations and stated as [37],
190
+ ∇ × (∇ × Et(r)) − k2
191
+ 0ϵr(r)Et(r) = iωµ0J(r)
192
+ (1)
193
+ where Et is the total electric field, k0 = ω√µ0ϵ0 is the
194
+ wavenumber of the homogeneous background and J is the
195
+ contrast current density and can be calculated using equivalence
196
+ theorem [38] as J(r) = χ(r)Et(r) where χ(r) = ϵr(r)−1 and
197
+ is called the contrast. The time-dependence factor exp(iωt)
198
+ with angular working frequency ω is assumed and will be
199
+ suppressed throughout this paper. By using Dyadic Green’s
200
+ function [39], Equation (1) can be formalized by two coupled
201
+ integral equations. The first one, called Lippmann–Schwinger
202
+ equation, relates total electric fields Et in an unknown domain
203
+ to contrast current density J,
204
+ Et(r) = Ei(r) + k2
205
+ 0
206
+
207
+ Dinv
208
+ g(r, r′)J(r′)dr′,
209
+ (2)
210
+
211
+ 3
212
+ where r ∈ Dinv, and
213
+ g(r, r′) = 1
214
+ 4iH(2)
215
+ 0 (k0|r − r′|),
216
+ and H(2)
217
+ 0
218
+ is the Hankel function of the second kind, denotes
219
+ 2D free space Green’s function. The second equation, referred
220
+ to as the data equation, maps the contrast current density J
221
+ to the scattered electric fields Es at the receivers locations,
222
+ Es(r) = k2
223
+ 0
224
+
225
+ Dinv
226
+ g(r, r′)J(r′)dr′, r ∈ S.
227
+ (3)
228
+ We discretize the investigation domain Dinv with N × N units
229
+ and rewrite Equations (2) and (3) as
230
+ Et = Ei + GdχEt
231
+ (4)
232
+ Es = GsχEt + δ
233
+ (5)
234
+ where Gd ∈ RN 2×N 2 and Gs ∈ RNr×N 2 have analytical
235
+ closed form [7]. Moreover, Et, Ei and Es respectively
236
+ correspond to total, incident and scattered electric fields
237
+ and χ is a diagonal matrix with the diagonal elements
238
+ χ(n, n) = ϵr(rn) − 1. We also consider additive noise δ to the
239
+ measurements.
240
+ We merge Equations (4) and (5) to make a single
241
+ expression for the forward equation [7],
242
+ Es = Gsχ(I − Gdχ)−1Ei + δ,
243
+ (6)
244
+ which is a nonlinear mapping χ �→ Es. It is convenient to
245
+ define a forward operator A mapping χ to Es,
246
+ y = A(x) + δ,
247
+ (7)
248
+ where A(·) is the nonlinear forward scattering operator
249
+ A(χ) = Gsχ(I − Gdχ)−1Ei,
250
+ (8)
251
+ y = Es and x = χ. The task of inverse scattering is to
252
+ reconstruct the contrast signal χ from the scattered fields Es
253
+ where we assume Gd, Gs, incident electric waves Ei and
254
+ consequently the forward operator A(·) are known. In the next
255
+ section, we briefly review deep generative models as the prior
256
+ model for inverse problems.
257
+ III. DEEP GENERATIVE MODELS
258
+ One major division in machine learning is generative and
259
+ discriminative models. In discriminative models, one aims
260
+ to learn a direct mapping from the measurements to the
261
+ target signals. Discriminative approaches have been extensively
262
+ used in solving inverse problems, specifically U-Net [36] has
263
+ shown great success in many applications such as computed
264
+ tomography (CT) [40], magnetic resonance imaging (MRI) [41],
265
+ optoacoustic tomography [42] and electromagnetic inverse
266
+ scattering [16]. The key idea of this success might be ascribed to
267
+ having a multiscale architecture [43]. Recently, the variational
268
+ version of U-Net has shown excellent performance for posterior
269
+ sampling and uncertainty quantification of inverse scattering
270
+ problem [44].
271
+ On the other hand, the task of generative models is to learn
272
+ the probability distribution of data. Latent-variable generative
273
+ models including generative adversarial networks (GANs) [21],
274
+ [22], variational autoencoders [23] and normalizing flows [24]–
275
+ [26] are a subcategory of deep generative models (DGMs)
276
+ which train a deep neural network, called the generator, to
277
+ transform the samples of a simple and known distribution
278
+ (often Gaussian) to data distribution samples. DGMs have
279
+ many applications such as image generation [22], image-to-
280
+ image translation [45], density estimation [46] and variational
281
+ inference [47], [48]. Recently, DGMs have been used as a prior
282
+ for solving inverse problems [28], [35], [49]–[52]; consider a
283
+ DGM trained on a training set of target signals (the solutions
284
+ of a given inverse problem), one can search in the latent space
285
+ of the trained generator for the latent code yielding a solution
286
+ aligns with the given measurements. The pre-trained generator
287
+ plays the role of an effective regularizer for generating plausible
288
+ target signals. As discussed earlier, the key advantage of this
289
+ approach is that the measurements are not used in the training
290
+ phase. As a result, once the DGM is trained, it can be used
291
+ for solving any inverse problems.
292
+ However, the choice of DGM is of paramount importance
293
+ to provide stable training, high-quality sample generation,
294
+ and an effective regularizer for solving ill-posed inverse
295
+ problems. While modern GANs with the various innovations
296
+ in architectures and training protocols exhibit high-quality
297
+ samples, they suffer from training instability [53], [54] and have
298
+ shown poor reconstructions when used as a prior for solving ill-
299
+ posed inverse problems [35], [55]. Normalizing flows alleviates
300
+ many drawbacks of GAN; they are stable in training and can
301
+ produce high-quality samples. Normalizing flows comprise a
302
+ set of bijective transformations which have tractable inverse
303
+ and log det Jacobian computations. They give access to the
304
+ likelihood of the generated samples and can be trained based
305
+ on maximum likelihood. Normalizing flows as a prior were
306
+ shown to be more effective than GANs for solving inverse
307
+ problems [32], [33]. However unlike GANs, normalizing flows
308
+ are bijective mappings, having the same dimension in the
309
+ latent space and data space, consequently, the network output
310
+ is not constraint and leading to poor regularization for solving
311
+ ill-posed inverse problems [14].
312
+ More recently, the authors of [35] showed that injective
313
+ normalizing flows are so effective for solving ill-posed inverse
314
+ problems. Injective normalizing flows are an extension of
315
+ normalizing flows which have a low-dimensional latent space.
316
+ Injective flows provide a low-dimensional representation of the
317
+ data (like GANs) which perform as a strong regularization for
318
+ solving inverse problems while giving access to the likelihood
319
+ of the generated samples (like normalizing flows) which can
320
+ be seen as the second regularization. In the next section, we
321
+ briefly review injective flows called Trumpets.
322
+ IV. INJECTIVE NORMALIZING FLOWS
323
+ Injective normalizing flows [35] map a low-dimensional
324
+ latent space to the high-dimensional data space using a set of
325
+
326
+ 4
327
+ MOG (µz)
328
+ <latexit sha1_base64="C4QjJmxveAJDFx/9MTdnNEoO
329
+ sB8=">ACAnicbVA9SwNBEN3zM8avqJXYLAYhNuFOIloGLbQRI5gPSMKxt9kS3bvjt05MR6HjX/FxkIRW3+Fnf/GT
330
+ XKFJj4YeLw3w8w8LxRcg21/W3PzC4tLy5mV7Ora+sZmbmu7poNIUValgQhUwyOaCe6zKnAQrBEqRqQnWN0bnI/8+h1T
331
+ mgf+LQxD1pak5/MupwSM5OZ2W8DuIb6vsCFpCUj92EiHCZuLm8X7THwLHFSkcpKm7uq9UJaCSZD1QrZuOHUI7Jgo
332
+ 4FSzJtiLNQkIHpMeahvpEMt2Oxy8k+MAoHdwNlCkf8Fj9PRETqfVQeqZTEujraW8k/uc1I+ietmPuhxEwn04WdSOBIc
333
+ CjPHCHK0ZBDA0hVHFzK6Z9ogFk1rWhOBMvzxLakdFp1Q8vinly2dpHBm0h/ZRATnoBJXRJaqgKqLoET2jV/RmPVkv1r
334
+ v1MWmds9KZHfQH1ucPKAKXSg=</latexit>
335
+ Fig. 2: Injective normalizing flows [35] comprise two submodules, a low-dimensional bijective flow hη and an injective network
336
+ with expansive layers gγ. The MOG initialization is the mean of the Gaussian in the latent space zinit = µz (red circle).
337
+ invertible neural networks. Injective normalizing flows have
338
+ a fast inverse on the range and give access to the likelihood
339
+ of the generated samples. As shown in Figure 2, fθ(z) =
340
+ gγ(hη(z)) with weights θ = (γ, η) comprises two subnetworks:
341
+ an injective part (with expansive layers) gγ that maps a low-
342
+ dimensional space Rd to high-dimensional data space RD
343
+ where d ≪ D, and a bijective mapping hη that keeps the
344
+ dimension in low dimensional space Rd. The training of the
345
+ injective normalizing flows consists of two phases, at first, the
346
+ range of the injective generator must be adjusted to lie on the
347
+ training data by optimizing over the weights of the injective
348
+ subnetwork gγ, when we ensure the training data are close to
349
+ the range of the generator, in the second phase the likelihood
350
+ of the pre-image of the training data should be maximized
351
+ over the weights of the bijective subnetwork hη. Further details
352
+ are explained in [35]. When the network is trained, we can
353
+ generate random samples similar to the training data
354
+ xgen = f(zgen)
355
+ (9)
356
+ where zgen ∼ N(µz, σ2
357
+ zI). Further information about the
358
+ universality of density and manifold approximation of injective
359
+ normalizing flows are studied in [56].
360
+ Due to having a low-dimensional latent space, injec-
361
+ tive flows provide a low-dimensional manifold in the high-
362
+ dimensional data space. When they are trained, the manifold
363
+ would contain plausible samples which can be used as an
364
+ effective regularizer for solving ill-posed inverse problems.
365
+ The injective part provides a projection operator gγ(g†
366
+ γ(x))
367
+ which maps the data samples x to the intermediate space by
368
+ z′ = g†
369
+ γ(x) and projects them back to the data space by g†
370
+ γ(z′).
371
+ The authors of [35] used the projection operator to project a
372
+ sample on the manifold to suppress the noise and artifacts,
373
+ as the learned manifold contains high-quality samples. In the
374
+ next section, we will present our method of solving inverse
375
+ scattering problems using injective normalizing flows.
376
+ V. INJECTIVE FLOWS FOR INVERSE SCATTERING
377
+ Inverse scattering is a severely ill-posed inverse problem,
378
+ which means a small perturbation in the scattered fields leads
379
+ to an exponentially large error in the contrast [7]. This makes
380
+ the inversion unstable even for a small amount of noise. As
381
+ discussed in section II, inverse scattering is a nonlinear inverse
382
+ problem whose nonlinearity heavily relies on the maximum
383
+ contrast value. For objects with large contrasts, the problem
384
+ becomes highly nonlinear, which makes the inversion extremely
385
+ difficult. In such a scenario, the significance of having a strong
386
+ regularizer to restrict the search domain is crucially important.
387
+ We consider the contrast signal χ = x ∈ X and
388
+ the scattered fields Es = y ∈ Y, described by forward
389
+ operator (7), as random vectors. While our framework admits
390
+ other distributions beyond Gaussian for the additive noise δ in
391
+ (7), in this paper we assume that δ is a random vector with
392
+ Gaussian distribution δ ∼ N(0, σ2I) yielding
393
+ Y |X ∼ N(A(X), σ2I).
394
+ (10)
395
+ One effective approach for solving ill-posed inverse problems is
396
+ computing MAP estimate, where we look for the solution x that
397
+ has the highest posterior likelihood for a given measurement
398
+ y,
399
+ xMAP = arg maxx log(pX|Y (x|y)),
400
+ (11)
401
+
402
+ 85
403
+ where pX|Y (x|y) is the posterior distribution for the given
404
+ measurement y. Using Bayes theorem yields,
405
+ xMAP
406
+ =
407
+ arg minx − log(pX|Y (x|y))
408
+ =
409
+ arg minx − log(pY |X(y|x)pX(x)
410
+ pY (y)
411
+ )
412
+ =
413
+ arg minx − log(pY |X(y|x)) − log(pX(x)).
414
+ (12)
415
+ The first term can be obtained from (10),
416
+ xMAP = arg minx
417
+ 1
418
+ 2∥y − A(x)∥2
419
+ 2 − λ log(pX(x)),
420
+ (13)
421
+ where the first term is the data-consistency loss while
422
+ log(pX(x)) is the prior distribution of the contrast and plays
423
+ the role of the regularization. We also have λ which is a
424
+ hyperparameter to adjust the amount of regularization term
425
+ (although it comes from the noise standard deviation which
426
+ we assume is unknown). In principle, the prior distribution
427
+ pX(x) is not available and must be estimated. For example,
428
+ traditionally pX(x) is approximated with a Gaussian distribu-
429
+ tion with zero mean leading to the Tikhonov regularization.
430
+ However, a Gaussian distribution is often too far from the true
431
+ prior distribution and leads to poor reconstructions.
432
+ In this paper, we study a new regularization family for
433
+ inverse scattering based on deep generative models. We consider
434
+ a training set of contrast signals {x(i)}N
435
+ i=1 is available, and we
436
+ train a deep generative model x = f(z) to produce high-quality
437
+ contrast samples. we expect the trained generator f to produce
438
+ high-quality contrast samples when it takes random samples
439
+ from the Gaussian distribution in the latent space z ∈ Z.
440
+ As discussed in section III, this property of deep generative
441
+ models makes them an effective regularizer for solving inverse
442
+ problems [28].
443
+ We use injective normalizing flows as the generator of the
444
+ contrast signal since they are well-suited for solving ill-posed
445
+ inverse problems [35]. We perform an optimization in the latent
446
+ space to find the latent code that aligns with scattered fields y,
447
+ zMAP = arg max
448
+ z
449
+ 1
450
+ 2∥y −A(f(z))∥2
451
+ 2 +λ log(pX(f(z))), (14)
452
+ Where an approximation to pX is also provided by the injective
453
+ flows and acts as the second regularizer. The reconstructed
454
+ contrast signal can be obtained as xMAP = f(zMAP). We
455
+ call this method latent space optimization (LSO). It is worth
456
+ mentioning that (14) has been previously proposed by [32],
457
+ [33] for solving compressed sensing inverse problems using
458
+ regular normalizing flows.
459
+ Unlike the supervised learning methods for inverse scatter-
460
+ ing [13], [15]–[17] which use a paired training set of contrast
461
+ and scattered fields {(x(i), y(i))}N
462
+ i=1, our framework benefits
463
+ from an unsupervised learning paradigm where scattered fields
464
+ are not used in the training of injective flows. This means, if the
465
+ distribution of the scattered fields changes (due to the change
466
+ in experimental configuration), the generative network is not
467
+ required to be retrained unlike the supervised methods; as soon
468
+ as the injective generator is trained over the contrast signals, we
469
+ can optimize (14) for each new scattered fields to reconstruct
470
+ Ground truth
471
+ BP (𝜖! = 1.2)
472
+ BP (𝜖! = 2)
473
+ BP (𝜖! = 4)
474
+ BA (𝜖! = 1.2)
475
+ BA (𝜖! = 2)
476
+ BA (𝜖! = 4)
477
+ Fig. 3: Performance analysis of back-propagation (BP) and
478
+ Born approximation (BA) methods for different ϵr; while BA
479
+ and BP reconstructions are visually meaningful for small ϵr,
480
+ their performance sharply drop for objects with large ϵr.
481
+ Random ellipses
482
+ MNIST
483
+ Fig. 4: Illustration of the MOG initializer in the data space
484
+ f(µz) for random ellipses and MNIST datasets
485
+ the corresponding contrast. Apart from the advantages of an
486
+ unsupervised learning paradigm, the proposed method fully
487
+ exploits the underlying physics of the scattering problem by
488
+ optimizing over the complex-valued scattered fields in (14).
489
+ Kothari et al. [57] have shown incorporating wave physics
490
+ in the neural network architecture can significantly improve
491
+ the quality of reconstructions, especially for out-of-distribution
492
+ data.
493
+ We solve the optimization problem (14) by Adam op-
494
+ timizer [58] and compute the gradients with respect to z
495
+ by automatic differentiation provided by TensorFlow [59] in
496
+ Python. We also use an alternative method for Equation (14)
497
+ proposed by [35] which performs the optimization in the data
498
+ space,
499
+ xMAP = arg min
500
+ x
501
+ 1
502
+ 2∥y −A(g(g†(x)))∥2
503
+ 2 −λ log(pX(x)) (15)
504
+ where g(g†(x)) is the projection operator explained in sec-
505
+ tion IV. We call this method data space optimization (DSO).
506
+ This method is shown in [35] to be effective for solving
507
+ linear inverse problems. While in each of the LSO iterations
508
+ the reconstructed point x = f(z) is always on the learned
509
+ manifold, this is not the case for the DSO method, where the
510
+ reconstructed image might be off the manifold. Moreover, the
511
+ DSO method requires more computations than LSO as we need
512
+ to take derivatives over the reverse direction of the injective
513
+ subnetwork g†(x).
514
+ The choice of initial guess is crucially important in inverse
515
+
516
+ 0.07
517
+ 0.41
518
+ 0.08
519
+ 0.01
520
+ 0.02
521
+ -0.33
522
+ 0.08
523
+ 0.14
524
+ 0.12
525
+ 0.00
526
+ -0.04
527
+ -0.156
528
+ Ground truths
529
+ Projections on the manifold
530
+ Generated samples
531
+ Fig. 5: Performance evaluation of the trained injective normalizing flows for random ellipses dataset; ground truth contrasts,
532
+ their projections on the learned manifold and some randomly generated samples.
533
+ scattering solvers. While a poor initialization misleads the
534
+ optimization process, a good initial step helps the algorithm to
535
+ converge more accurately and faster. The authors of [9] used
536
+ Born approximation as the initialization for the distorted Born
537
+ iterative method (DBIM). A back-propagation (BP) solution
538
+ was also used in [10], [60] as an initial guess of the contrast
539
+ source inversion (CSI) method. Figure 3 shows the ground
540
+ truth, back-propagation (BP) and Born approximations (BA)
541
+ for an object with different maximum ϵr values. While BP
542
+ and BA can present fuzzy reconstructions for objects with
543
+ small permittivity, their performance sharply drops for large ϵr
544
+ (especially numerically) which makes them a poor initialization
545
+ for strong scatterers.
546
+ In order to circumvent this issue, we use a data-driven
547
+ initialization suggested in [32]; mean of the Gaussian distri-
548
+ bution (MOG) in the latent space as shown in Figure 2. The
549
+ MOG initializer zinit = µz provides a fixed initialization with
550
+ respect to the measurements (scattered fields); thereby being
551
+ independent of the maximum contrast value and the problem
552
+ configuration. This property helps (14) and (15) to converge
553
+ better even for a large ϵr. In section VI, we will show that
554
+ MOG initialization significantly improves the quality of the
555
+ reconstructions compared to BP. To our best knowledge, this
556
+ is the first time that we propose a data-driven initializer for
557
+ inverse scattering.
558
+ VI. COMPUTATIONAL EXPERIMENTS
559
+ In the following, we will evaluate the performance of DSO
560
+ and LSO for inverse scattering. We consider the MOG and BP
561
+ initializations for DSO while using just MOG initialization for
562
+ LSO. We compare the performance of our proposed methods
563
+ with a traditional iterative method DBIM [9]. While our method
564
+ is based on an unsupervised-learning paradigm; scattered fields
565
+ are not used during training, we also compare its performance
566
+ with a successful supervised learning method, U-Net [36] which
567
+ has shown great success for inverse scattering [16], to show the
568
+ effectivity of our method. U-Net takes the back-propagation
569
+ (BP) as input and returns the permittivity pattern of the object
570
+ in the output.
571
+ We use MNIST [61] with 60000 training samples in the
572
+ resolution N = 32. We also use a custom dataset of 60000
573
+ training samples with resolution N = 64 of overlapping ellipses
574
+ used in [14] to have a more challenging task for accurate
575
+ reconstructions.
576
+ We use Ni = 12 incident plane waves and Nr = 12
577
+ receivers, uniformly distributed on a circle with radius R =
578
+ 20 cm around the object with maximum permittivity ϵr and
579
+ dimension D = 20 cm. The working frequency is 3 GHz and
580
+ we added 30 dB noise to the scattered fields.
581
+ We used the injective normalizing flows with the same
582
+ architecture described in [35]. We trained the injective subnet-
583
+ work gγ for 150 epochs to ensure the training samples (contrast
584
+ signals) are close to the range of the generator. Figure 5 shows
585
+ some test samples and their projections on the learned manifold.
586
+ Then we trained the bijective subnetwork hη for 150 epochs
587
+ to maximize the likelihood of the pre-image of the training
588
+ samples in the intermediate space. Figure 5 illustrates some
589
+ randomly generated samples which confirms that the model
590
+ can produce plausible samples to be used as an effective prior
591
+ for solving inverse scattering.
592
+ We optimize (14) and (15) by using Adam optimizer with a
593
+ learning rate of 0.05 for 300 iterations. We use λ = 0.01 for BP
594
+ and λ = 0 for MOG initialization. It is worth mentioning that
595
+ when we use MOG initializer, we start from high-likelihood
596
+ regions (mean of the Gaussian) which can be viewed as a
597
+ hidden regularizer, we thus use λ = 0 in this case. Figure 4
598
+ shows the MOG initialization for MNIST and random ellipses
599
+ datasets.
600
+ Figures 6 and 7 show the performance of different methods
601
+ for inverse scattering for ϵr = 4 over five test samples from
602
+ random ellipses and MNIST datasets. While the traditional
603
+ iterative method (DBIM) meets with complete failure in this
604
+ challenging task (ϵr = 4 and 30 dB noise), DSO and LSO
605
+ have strikingly more accurate reconstructions, which clearly
606
+
607
+ 7
608
+ BP
609
+ DBIM
610
+ U-Net
611
+ DSO (BP)
612
+ DSO (MOG)
613
+ LSO
614
+ Ground truth
615
+ Fig. 6: Performance comparison of different methods over random ellipses dataset in resolution 64 × 64
616
+ BP
617
+ DBIM
618
+ U-Net
619
+ DSO (BP)
620
+ DSO (MOG)
621
+ LSO
622
+ Ground truth
623
+ Fig. 7: Performance comparison of different methods over MNIST dataset in resolution 32 × 32
624
+
625
+ 0.5
626
+ 2.5
627
+ 3.7
628
+ 4.0
629
+ 4.0
630
+ 3.5
631
+ 3.6
632
+ -3.5
633
+ 1.0
634
+ 1.0
635
+ 1.0
636
+ 1.0
637
+ 0.5
638
+ 5.3
639
+ 4.0
640
+ 4.0
641
+ 4.0
642
+ 4.0
643
+ 3.5
644
+ 1.0
645
+ 1.0
646
+ 1.0
647
+ 1.0
648
+ 1.0
649
+ 0.5
650
+ 4.5
651
+ 4.0
652
+ 4.0
653
+ 3.9
654
+ 3.9
655
+ -3.2
656
+ 1.0
657
+ 1.0
658
+ 1.0
659
+ 1.0
660
+ 1.0
661
+ 0.5
662
+ 2.9
663
+ 4.0
664
+ 4.0
665
+ 4.0
666
+ 4.0
667
+ 4.0
668
+ -1.8
669
+ 1.0
670
+ 1.0
671
+ 1.0
672
+ 1.0
673
+ 1.0
674
+ 0.5
675
+ 1.5
676
+ 3.9
677
+ 4.0
678
+ 4.0
679
+ 4.0
680
+ 4.0
681
+ 0.3
682
+ -4.5
683
+ 1.0
684
+ 1.0
685
+ 1.0
686
+ 1.0
687
+ 1.00.5
688
+ 4.0
689
+ 9.4
690
+ 4.2
691
+ 3.9
692
+ 4.0
693
+ 0.4
694
+ 0.7
695
+ 0.9
696
+ 5.7
697
+ 3.9
698
+ 4.1
699
+ 4.1
700
+ 3.9
701
+ 4.0
702
+ 5
703
+ 01
704
+ 3.3
705
+ 1.0
706
+ 0.9
707
+ 0.9
708
+ 1.0
709
+ 1.0
710
+ 0.6
711
+ 5.6
712
+ 4.0
713
+ 11.8
714
+ 6.6
715
+ 4.0
716
+ 4.0
717
+ -3.2
718
+ 1.0
719
+ 3.2
720
+ -1.6
721
+ 0.9
722
+ 1.0
723
+ 0.5
724
+ 5.8
725
+ 4.0
726
+ 11.9
727
+ 7.8
728
+ 4.1
729
+ 4.0
730
+ -3.8
731
+ 1.0
732
+ .1.4
733
+ -0.8
734
+ 0.9
735
+ 1.0
736
+ 4.4
737
+ 4.0
738
+ 12.1
739
+ 4.2
740
+ 3.9
741
+ 4.0
742
+ 0.7
743
+ 9
744
+ 1
745
+ 0.3
746
+ 2.8
747
+ 1.0
748
+ -7.1
749
+ 0.8
750
+ 0.9
751
+ 1.08
752
+ TABLE I: Performance of different methods for solving inverse
753
+ scattering (ϵr = 4) averaged over 25 test samples
754
+ PSNR
755
+ SSIM
756
+ MNIST
757
+ Ellipses
758
+ MNIST
759
+ Ellipses
760
+ BP
761
+ 7.75
762
+ 7.00
763
+ 0.01
764
+ 0.01
765
+ DBIM [9]
766
+ 5.77
767
+ 4.67
768
+ 0.01
769
+ 0.01
770
+ U-Net [36]
771
+ 24.26
772
+ 21.94
773
+ 0.90
774
+ 0.82
775
+ DSO (BP)
776
+ 8.73
777
+ 7.89
778
+ 0.16
779
+ 0.16
780
+ DSO (MOG)
781
+ 21.50
782
+ 14.56
783
+ 0.56
784
+ 0.44
785
+ LSO (MOG)
786
+ 25.22
787
+ 20.50
788
+ 0.89
789
+ 0.85
790
+ Fig. 8: The performance of different methods for different ϵr
791
+ over MNIST dataset; SSIM is computed over the normalized
792
+ signals between −1 and 1.
793
+ shows the significance of a data-driven regularization. Moreover,
794
+ the MOG initialization, as we expected, exhibits remarkably
795
+ better reconstructions compared to BP for this high permittivity
796
+ (ϵr = 4). Furthermore, both figures show that LSO outperforms
797
+ DSO; running optimization in the latent space results in better
798
+ reconstructions as discussed in section V. Finally, while LSO
799
+ does not use scattered fields in the training phase, it could
800
+ present the reconstructions with comparable quality (or even
801
+ better) to the highly successful supervised method U-Net.
802
+ Table I shows the numerical results in PSNR and SSIM
803
+ averaged over 25 test samples.
804
+ As discussed in section V, the maximum ϵr of the
805
+ object plays a significant role in the performance of inverse
806
+ scattering solvers; objects with large ϵr are more difficult to
807
+ be reconstructed. Figure 8 demonstrates the performance of
808
+ different methods per ϵr over the MNIST dataset. This figure
809
+ discovers that LSO with MOG initialization is effective even
810
+ for objects with large, ϵr which clearly signifies the power of
811
+ a data-driven initialization and performing the optimization in
812
+ the latent space.
813
+ VII. LIMITATIONS AND CONCLUSIONS
814
+ We proposed a learning-based framework for inverse
815
+ scattering using an injective prior. The proposed method fully
816
+ exploits the physical domain of the scattering problem while
817
+ benefiting from a data-driven initialization which makes a
818
+ powerful solver even for objects with a large contrast. The
819
+ invertible generator admits performing optimization in both
820
+ latent and data space and uses a data-driven or back-projection
821
+ as the initializer. We showed that performing optimization in
822
+ the latent space and using the mean of the Gaussian as the
823
+ initial guess significantly outperforms the traditional iterative
824
+ methods and even gives reconstructions comparable to the
825
+ successful supervised learning method, U-Net.
826
+ Limitations: The proposed framework has several limitations
827
+ and entails discussion. It requires running an iterative method
828
+ in test-time, which is slow and cannot be used for real-time
829
+ applications. To speed up the convergence, one may consider a
830
+ more accurate initial guess by exploiting the physical domain
831
+ in the data-driven initializer; a combination of traditional back-
832
+ projection (like BP) and the data-driven initializers (like MOG).
833
+ Recently, Hussein et all. [31] optimized the generator weights
834
+ with a small rate after finding the optimal latent code in
835
+ Equation (14) to further improve the reconstructions. This
836
+ idea might be taken in our framework to go beyond the quality
837
+ of the generator, but we leave it for future works.
838
+ REFERENCES
839
+ [1] N. K. Nikolova, “Microwave imaging for breast cancer,” IEEE microwave
840
+ magazine, vol. 12, no. 7, pp. 78–94, 2011. 1
841
+ [2] A. Friedman, “Application of inverse scattering to oil field evaluation
842
+ problems,” in Mathematics in Industrial Problems.
843
+ Springer, 1998, pp.
844
+ 169–178. 1
845
+ [3] R. Zoughi, Microwave non-destructive testing and evaluation principles.
846
+ Springer Science & Business Media, 2000, vol. 4. 1
847
+ [4] V. Khorashadi-Zadeh and M. Dehmollaian, “Through a cinder block wall
848
+ refocusing using sar back projection method,” IEEE Transactions on
849
+ Antennas and Propagation, vol. 67, no. 2, pp. 1212–1222, 2018. 1
850
+ [5] Y.-Q. Jin, Electromagnetic scattering modelling for quantitative remote
851
+ sensing.
852
+ World Scientific, 1993. 1
853
+ [6] A. I. Nachman, “Global uniqueness for a two-dimensional inverse
854
+ boundary value problem,” Annals of Mathematics, pp. 71–96, 1996.
855
+ 1
856
+ [7] X. Chen, Computational methods for electromagnetic inverse scattering.
857
+ John Wiley & Sons, 2018. 1, 3, 4
858
+ [8] Y. Wang and W. C. Chew, “An iterative solution of the two-dimensional
859
+ electromagnetic inverse scattering problem,” International Journal of
860
+ Imaging Systems and Technology, vol. 1, no. 1, pp. 100–108, 1989. 1
861
+ [9] W. C. Chew and Y.-M. Wang, “Reconstruction of two-dimensional
862
+ permittivity distribution using the distorted born iterative method,” IEEE
863
+ transactions on medical imaging, vol. 9, no. 2, pp. 218–225, 1990. 1, 6,
864
+ 8
865
+ [10] P. M. Van Den Berg and R. E. Kleinman, “A contrast source inversion
866
+ method,” Inverse problems, vol. 13, no. 6, p. 1607, 1997. 1, 6
867
+ [11] X. Chen, “Subspace-based optimization method for solving inverse-
868
+ scattering problems,” IEEE Transactions on Geoscience and Remote
869
+ Sensing, vol. 48, no. 1, pp. 42–49, 2009. 1
870
+ [12] Y. Khoo and L. Ying, “Switchnet: a neural network model for forward
871
+ and inverse scattering problems,” SIAM Journal on Scientific Computing,
872
+ vol. 41, no. 5, pp. A3182–A3201, 2019. 1
873
+ [13] P. Ran, Y. Qin, and D. Lesselier, “Electromagnetic imaging of a
874
+ dielectric micro-structure via convolutional neural networks,” in 2019
875
+ 27th European Signal Processing Conference (EUSIPCO).
876
+ IEEE, 2019,
877
+ pp. 1–5. 1, 5
878
+ [14] A. Khorashadizadeh, K. Kothari, L. Salsi, A. A. Harandi, M. de Hoop,
879
+ and I. Dokmani´c, “Conditional Injective Flows for Bayesian Imaging,”
880
+ arXiv preprint arXiv:2204.07664, 2022. 1, 3, 6
881
+ [15] L. Li, L. G. Wang, F. L. Teixeira, C. Liu, A. Nehorai, and T. J. Cui,
882
+ “DeepNIS: Deep neural network for nonlinear electromagnetic inverse
883
+ scattering,” IEEE Transactions on Antennas and Propagation, vol. 67,
884
+ no. 3, pp. 1819–1825, 2018. 1, 5
885
+
886
+ 1
887
+ DSO(BP)
888
+ 0.9
889
+ DSO(MOG)
890
+ LSO
891
+ 0.8
892
+ 0.7
893
+ 0.6
894
+ SIM
895
+ 0.5
896
+ S
897
+ 0.4
898
+ 0.3
899
+ 0.2
900
+ 0.1
901
+ 0
902
+ 2
903
+ 3
904
+ 4
905
+ 5
906
+ 9
907
+ 1
908
+ E9
909
+ [16] Z. Wei and X. Chen, “Deep-learning schemes for full-wave nonlinear
910
+ inverse scattering problems,” IEEE Transactions on Geoscience and
911
+ Remote Sensing, vol. 57, no. 4, pp. 1849–1860, 2018. 1, 3, 5, 6
912
+ [17] J. E. Fajardo, J. Galv´an, F. Vericat, C. M. Carlevaro, and R. M. Irastorza,
913
+ “Phaseless microwave imaging of dielectric cylinders: An artificial neural
914
+ networks-based approach,” arXiv preprint arXiv:1908.10424, 2019. 1, 5
915
+ [18] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu, “Towards
916
+ deep learning models resistant to adversarial attacks,” arXiv preprint
917
+ arXiv:1706.06083, 2017. 1
918
+ [19] V. Antun, F. Renna, C. Poon, B. Adcock, and A. C. Hansen, “On
919
+ instabilities of deep learning in image reconstruction and the potential
920
+ costs of AI,” Proceedings of the National Academy of Sciences, vol. 117,
921
+ no. 48, pp. 30 088–30 095, 2020. 1
922
+ [20] X. Chen, Z. Wei, M. Li, and P. Rocca, “A review of deep learning
923
+ approaches for inverse scattering problems (invited review),” Progress
924
+ In Electromagnetics Research, vol. 167, pp. 67–81, 2020. 1
925
+ [21] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley,
926
+ S. Ozair, A. Courville, and Y. Bengio, “Generative adversarial nets,”
927
+ Advances in neural information processing systems, vol. 27, 2014. 2, 3
928
+ [22] A. Radford, L. Metz, and S. Chintala, “Unsupervised representation
929
+ learning with deep convolutional generative adversarial networks,” arXiv
930
+ preprint arXiv:1511.06434, 2015. 2, 3
931
+ [23] D. P. Kingma and M. Welling, “Auto-encoding variational Bayes,” arXiv
932
+ preprint arXiv:1312.6114, 2013. 2, 3
933
+ [24] L. Dinh, D. Krueger, and Y. Bengio, “Nice: Non-linear independent
934
+ components estimation,” arXiv preprint arXiv:1410.8516, 2014. 2, 3
935
+ [25] L. Dinh, J. Sohl-Dickstein, and S. Bengio, “Density estimation using
936
+ real nvp,” arXiv preprint arXiv:1605.08803, 2016. 2, 3
937
+ [26] D. P. Kingma and P. Dhariwal, “Glow: Generative flow with invertible
938
+ 1 × 1 convolutions,” Advances in neural information processing systems,
939
+ vol. 31, 2018. 2, 3
940
+ [27] J. Ho, A. Jain, and P. Abbeel, “Denoising diffusion probabilistic models,”
941
+ Advances in Neural Information Processing Systems, vol. 33, pp. 6840–
942
+ 6851, 2020. 2
943
+ [28] A. Bora, A. Jalal, E. Price, and A. G. Dimakis, “Compressed sensing using
944
+ generative models,” in International Conference on Machine Learning.
945
+ PMLR, 2017, pp. 537–546. 2, 3, 5
946
+ [29] V. A. Kelkar and M. Anastasio, “Prior image-constrained reconstruction
947
+ using style-based generative models,” in International Conference on
948
+ Machine Learning.
949
+ PMLR, 2021, pp. 5367–5377. 2
950
+ [30] T. Karras, S. Laine, and T. Aila, “A style-based generator architecture
951
+ for generative adversarial networks,” in Proceedings of the IEEE/CVF
952
+ conference on computer vision and pattern recognition, 2019, pp. 4401–
953
+ 4410. 2
954
+ [31] S. A. Hussein, T. Tirer, and R. Giryes, “Image-adaptive gan based
955
+ reconstruction,” in Proceedings of the AAAI Conference on Artificial
956
+ Intelligence, vol. 34, no. 04, 2020, pp. 3121��3129. 2, 8
957
+ [32] M. Asim, M. Daniels, O. Leong, A. Ahmed, and P. Hand, “Invertible
958
+ generative models for inverse problems: mitigating representation error
959
+ and dataset bias,” in International Conference on Machine Learning.
960
+ PMLR, 2020, pp. 399–409. 2, 3, 5, 6
961
+ [33] J. Whang, Q. Lei, and A. Dimakis, “Compressed sensing with invertible
962
+ generative models and dependent noise,” in NeurIPS 2020 Workshop on
963
+ Deep Learning and Inverse Problems, 2020. 2, 3, 5
964
+ [34] J. Brehmer and K. Cranmer, “Flows for simultaneous manifold learning
965
+ and density estimation,” Advances in Neural Information Processing
966
+ Systems, vol. 33, pp. 442–453, 2020. 2
967
+ [35] K. Kothari, A. Khorashadizadeh, M. de Hoop, and I. Dokmani´c,
968
+ “Trumpets: Injective flows for inference and inverse problems,” in
969
+ Uncertainty in Artificial Intelligence.
970
+ PMLR, 2021, pp. 1269–1278. 2,
971
+ 3, 4, 5, 6
972
+ [36] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks
973
+ for biomedical image segmentation,” in International Conference on
974
+ Medical image computing and computer-assisted intervention.
975
+ Springer,
976
+ 2015, pp. 234–241. 2, 3, 6, 8
977
+ [37] P. Y. Chen, D. J. Bergman, and Y. Sivan, “Spectral decomposition of
978
+ the lippmann-schwinger equation applied to cylinders,” arXiv preprint
979
+ arXiv:1705.01747, 2017. 2
980
+ [38] S. R. Rengarajan and Y. Rahmat-Samii, “The field equivalence principle:
981
+ Illustration of the establishment of the non-intuitive null fields,” IEEE
982
+ Antennas and Propagation Magazine, vol. 42, no. 4, pp. 122–128, 2000.
983
+ 2
984
+ [39] H. Levine and J. Schwinger, “On the theory of electromagnetic wave
985
+ diffraction by an aperture in an infinite plane conducting screen,”
986
+ Communications on Pure and Applied Mathematics, vol. 3, no. 4, pp.
987
+ 355–391, 1950. 2
988
+ [40] K. H. Jin, M. T. McCann, E. Froustey, and M. Unser, “Deep convolutional
989
+ neural network for inverse problems in imaging,” IEEE Transactions on
990
+ Image Processing, vol. 26, no. 9, pp. 4509–4522, 2017. 3
991
+ [41] C. M. Hyun, H. P. Kim, S. M. Lee, S. Lee, and J. K. Seo, “Deep learning
992
+ for undersampled mri reconstruction,” Physics in Medicine & Biology,
993
+ vol. 63, no. 13, p. 135007, 2018. 3
994
+ [42] N. Davoudi, X. L. De´an-Ben, and D. Razansky, “Deep learning
995
+ optoacoustic tomography with sparse data,” Nature Machine Intelligence,
996
+ vol. 1, no. 10, pp. 453–460, 2019. 3
997
+ [43] T. Liu, A. Chaman, D. Belius, and I. Dokmanic, “Learning multiscale
998
+ convolutional dictionaries for image reconstruction,” IEEE Transactions
999
+ on Computational Imaging, 2022. 3
1000
+ [44] A. Khorashadizadeh, A. Aghababaei, T. Vlaˇsi´c, H. Nguyen, and
1001
+ I. Dokmani´c, “Deep variational inverse scattering,” arXiv preprint
1002
+ arXiv:2212.04309, 2022. 3
1003
+ [45] J.-Y. Zhu, T. Park, P. Isola, and A. A. Efros, “Unpaired image-to-image
1004
+ translation using cycle-consistent adversarial networks,” in Proceedings
1005
+ of the IEEE international conference on computer vision, 2017, pp.
1006
+ 2223–2232. 3
1007
+ [46] Q. Liu, J. Xu, R. Jiang, and W. H. Wong, “Density estimation using
1008
+ deep generative neural networks,” Proceedings of the National Academy
1009
+ of Sciences, vol. 118, no. 15, 2021. 3
1010
+ [47] D. Rezende and S. Mohamed, “Variational inference with normalizing
1011
+ flows,” in International conference on machine learning.
1012
+ PMLR, 2015,
1013
+ pp. 1530–1538. 3
1014
+ [48] G. Papamakarios, E. Nalisnick, D. J. Rezende, S. Mohamed, and
1015
+ B. Lakshminarayanan, “Normalizing flows for probabilistic modeling
1016
+ and inference,” Journal of Machine Learning Research, vol. 22, no. 57,
1017
+ pp. 1–64, 2021. 3
1018
+ [49] G. Ongie, A. Jalal, C. A. Metzler, R. G. Baraniuk, A. G. Dimakis, and
1019
+ R. Willett, “Deep learning techniques for inverse problems in imaging,”
1020
+ IEEE Journal on Selected Areas in Information Theory, vol. 1, no. 1,
1021
+ pp. 39–56, 2020. 3
1022
+ [50] B. Kawar, M. Elad, S. Ermon, and J. Song, “Denoising diffusion
1023
+ restoration models,” arXiv preprint arXiv:2201.11793, 2022. 3
1024
+ [51] T. Vlaˇsi´c, H. Nguyen, A. Khorashadizadeh, and I. Dokmani´c, “Implicit
1025
+ neural representation for mesh-free inverse obstacle scattering,” arXiv
1026
+ preprint arXiv:2206.02027, 2022. 3
1027
+ [52] A. Khorashadizadeh, A. Chaman, V. Debarnot, and I. Dokmani´c,
1028
+ “Funknn: Neural interpolation for functional generation,” arXiv preprint
1029
+ arXiv:2212.14042, 2022. 3
1030
+ [53] H. Thanh-Tung and T. Tran, “Catastrophic forgetting and mode collapse
1031
+ in gans,” in 2020 International Joint Conference on Neural Networks
1032
+ (IJCNN).
1033
+ IEEE, 2020, pp. 1–10. 3
1034
+ [54] M. Arjovsky and L. Bottou, “Towards principled methods for training
1035
+ generative adversarial networks,” arXiv preprint arXiv:1701.04862, 2017.
1036
+ 3
1037
+ [55] J. Whang, Q. Lei, and A. Dimakis, “Solving inverse problems with
1038
+ a flow-based noise model,” in International Conference on Machine
1039
+ Learning.
1040
+ PMLR, 2021, pp. 11 146–11 157. 3
1041
+ [56] M. Puthawala, M. Lassas, I. Dokmani´c, and M. de Hoop, “Universal
1042
+ joint approximation of manifolds and densities by simple injective flows,”
1043
+ arXiv preprint arXiv:2110.04227, 2021. 4
1044
+ [57] K. Kothari, M. de Hoop, and I. Dokmani´c, “Learning the geometry
1045
+ of wave-based imaging,” Advances in Neural Information Processing
1046
+ Systems, vol. 33, pp. 8318–8329, 2020. 5
1047
+ [58] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,”
1048
+ arXiv preprint arXiv:1412.6980, 2014. 5
1049
+ [59] M. Abadi, P. Barham, J. Chen, Z. Chen, A. Davis, J. Dean, M. Devin,
1050
+ S. Ghemawat, G. Irving, M. Isard et al., “Tensorflow: A system for large-
1051
+ scale machine learning,” in 12th {USENIX} symposium on operating
1052
+ systems design and implementation ({OSDI} 16), 2016, pp. 265–283. 5
1053
+ [60] P. M. van den Berg, A. Van Broekhoven, and A. Abubakar, “Extended
1054
+ contrast source inversion,” Inverse problems, vol. 15, no. 5, p. 1325,
1055
+ 1999. 6
1056
+ [61] Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner, “Gradient-based learning
1057
+ applied to document recognition,” Proceedings of the IEEE, vol. 86,
1058
+ no. 11, pp. 2278–2324, 1998. 6
1059
+
-dE1T4oBgHgl3EQfUgPr/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
.gitattributes CHANGED
@@ -4912,3 +4912,102 @@ StAzT4oBgHgl3EQfJPvm/content/2301.01078v1.pdf filter=lfs diff=lfs merge=lfs -tex
4912
  2tFRT4oBgHgl3EQfnjcF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4913
  5dE2T4oBgHgl3EQf6wiO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4914
  D9E4T4oBgHgl3EQffA2Y/content/2301.05104v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4912
  2tFRT4oBgHgl3EQfnjcF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4913
  5dE2T4oBgHgl3EQf6wiO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4914
  D9E4T4oBgHgl3EQffA2Y/content/2301.05104v1.pdf filter=lfs diff=lfs merge=lfs -text
4915
+ utAyT4oBgHgl3EQfm_iM/content/2301.00481v1.pdf filter=lfs diff=lfs merge=lfs -text
4916
+ 19A0T4oBgHgl3EQfMv_l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4917
+ atE_T4oBgHgl3EQfyxzf/content/2301.08320v1.pdf filter=lfs diff=lfs merge=lfs -text
4918
+ pNAyT4oBgHgl3EQfzPn6/content/2301.00700v1.pdf filter=lfs diff=lfs merge=lfs -text
4919
+ 59E3T4oBgHgl3EQfpgot/content/2301.04642v1.pdf filter=lfs diff=lfs merge=lfs -text
4920
+ N9E4T4oBgHgl3EQf9g6Z/content/2301.05356v1.pdf filter=lfs diff=lfs merge=lfs -text
4921
+ 3tFST4oBgHgl3EQfZDim/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4922
+ ZtE1T4oBgHgl3EQfcgT-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4923
+ ltE3T4oBgHgl3EQf6Avi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4924
+ ttE0T4oBgHgl3EQf9QKd/content/2301.02799v1.pdf filter=lfs diff=lfs merge=lfs -text
4925
+ p9A0T4oBgHgl3EQfKf-T/content/2301.02105v1.pdf filter=lfs diff=lfs merge=lfs -text
4926
+ StAzT4oBgHgl3EQfJPvm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4927
+ t9E3T4oBgHgl3EQf9wvz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4928
+ 7tAyT4oBgHgl3EQfQvZV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4929
+ JNFJT4oBgHgl3EQfGSzR/content/2301.11447v1.pdf filter=lfs diff=lfs merge=lfs -text
4930
+ TdFLT4oBgHgl3EQfQS8Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4931
+ TdFLT4oBgHgl3EQfQS8Y/content/2301.12031v1.pdf filter=lfs diff=lfs merge=lfs -text
4932
+ ltE3T4oBgHgl3EQf6Avi/content/2301.04787v1.pdf filter=lfs diff=lfs merge=lfs -text
4933
+ D9E1T4oBgHgl3EQf-QZ6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4934
+ WNAzT4oBgHgl3EQfYPzf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4935
+ 5NE1T4oBgHgl3EQfBAIU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4936
+ kdAyT4oBgHgl3EQfyPmS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4937
+ ttE0T4oBgHgl3EQf9QKd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4938
+ D9E4T4oBgHgl3EQffA2Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4939
+ 1tA0T4oBgHgl3EQfMv-f/content/2301.02137v1.pdf filter=lfs diff=lfs merge=lfs -text
4940
+ WNAzT4oBgHgl3EQfYPzf/content/2301.01333v1.pdf filter=lfs diff=lfs merge=lfs -text
4941
+ U9E5T4oBgHgl3EQfBQ5V/content/2301.05385v1.pdf filter=lfs diff=lfs merge=lfs -text
4942
+ otE0T4oBgHgl3EQfqgEA/content/2301.02552v1.pdf filter=lfs diff=lfs merge=lfs -text
4943
+ n9AyT4oBgHgl3EQfy_mq/content/2301.00695v1.pdf filter=lfs diff=lfs merge=lfs -text
4944
+ b9E0T4oBgHgl3EQfnwFc/content/2301.02516v1.pdf filter=lfs diff=lfs merge=lfs -text
4945
+ 2dAyT4oBgHgl3EQfbvf7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4946
+ VNFJT4oBgHgl3EQf3S18/content/2301.11661v1.pdf filter=lfs diff=lfs merge=lfs -text
4947
+ FdE2T4oBgHgl3EQfTAdd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4948
+ ZNE1T4oBgHgl3EQfwQX2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4949
+ VNFJT4oBgHgl3EQf3S18/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4950
+ U9E5T4oBgHgl3EQfBQ5V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4951
+ 79E3T4oBgHgl3EQfRwk1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4952
+ pNAyT4oBgHgl3EQfzPn6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4953
+ gtAzT4oBgHgl3EQfavzP/content/2301.01375v1.pdf filter=lfs diff=lfs merge=lfs -text
4954
+ c9E1T4oBgHgl3EQfdwSQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4955
+ 59E3T4oBgHgl3EQfpgot/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4956
+ atE_T4oBgHgl3EQfyxzf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4957
+ ZNE1T4oBgHgl3EQfwQX2/content/2301.03410v1.pdf filter=lfs diff=lfs merge=lfs -text
4958
+ JNFJT4oBgHgl3EQfGSzR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4959
+ C9E1T4oBgHgl3EQf-AYx/content/2301.03562v1.pdf filter=lfs diff=lfs merge=lfs -text
4960
+ MNE3T4oBgHgl3EQfYgqz/content/2301.04489v1.pdf filter=lfs diff=lfs merge=lfs -text
4961
+ XdE0T4oBgHgl3EQfWACD/content/2301.02272v1.pdf filter=lfs diff=lfs merge=lfs -text
4962
+ 7tAyT4oBgHgl3EQfQvZV/content/2301.00051v1.pdf filter=lfs diff=lfs merge=lfs -text
4963
+ N9AzT4oBgHgl3EQfzP7F/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4964
+ YdFRT4oBgHgl3EQf_jiA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4965
+ N9E4T4oBgHgl3EQf9g6Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4966
+ R9E0T4oBgHgl3EQfkwEq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4967
+ oNAzT4oBgHgl3EQfOPvq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4968
+ C9E1T4oBgHgl3EQf-AYx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4969
+ idE0T4oBgHgl3EQfpwEJ/content/2301.02542v1.pdf filter=lfs diff=lfs merge=lfs -text
4970
+ U9AzT4oBgHgl3EQfX_y1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4971
+ 1tA0T4oBgHgl3EQfMv-f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4972
+ XNE1T4oBgHgl3EQfvwUc/content/2301.03402v1.pdf filter=lfs diff=lfs merge=lfs -text
4973
+ edE4T4oBgHgl3EQfqA0r/content/2301.05196v1.pdf filter=lfs diff=lfs merge=lfs -text
4974
+ utFJT4oBgHgl3EQfeSy4/content/2301.11552v1.pdf filter=lfs diff=lfs merge=lfs -text
4975
+ mNFST4oBgHgl3EQfKDg1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4976
+ 7dE0T4oBgHgl3EQffQBI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4977
+ oNAzT4oBgHgl3EQfOPvq/content/2301.01164v1.pdf filter=lfs diff=lfs merge=lfs -text
4978
+ S9FAT4oBgHgl3EQf2B5J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4979
+ MNE3T4oBgHgl3EQfYgqz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4980
+ idE0T4oBgHgl3EQfpwEJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4981
+ YtAyT4oBgHgl3EQfiPjT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4982
+ v9E_T4oBgHgl3EQf_ByN/content/2301.08390v1.pdf filter=lfs diff=lfs merge=lfs -text
4983
+ 3NFKT4oBgHgl3EQfQi0f/content/2301.11767v1.pdf filter=lfs diff=lfs merge=lfs -text
4984
+ W9AyT4oBgHgl3EQfh_hF/content/2301.00386v1.pdf filter=lfs diff=lfs merge=lfs -text
4985
+ RNAyT4oBgHgl3EQfU_fg/content/2301.00137v1.pdf filter=lfs diff=lfs merge=lfs -text
4986
+ W9AyT4oBgHgl3EQfh_hF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4987
+ jdE2T4oBgHgl3EQfdQcV/content/2301.03903v1.pdf filter=lfs diff=lfs merge=lfs -text
4988
+ 7dE0T4oBgHgl3EQffQBI/content/2301.02401v1.pdf filter=lfs diff=lfs merge=lfs -text
4989
+ YtAyT4oBgHgl3EQfiPjT/content/2301.00393v1.pdf filter=lfs diff=lfs merge=lfs -text
4990
+ 3NFKT4oBgHgl3EQfQi0f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4991
+ idA0T4oBgHgl3EQfIf9D/content/2301.02075v1.pdf filter=lfs diff=lfs merge=lfs -text
4992
+ p9A0T4oBgHgl3EQfKf-T/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4993
+ wtFPT4oBgHgl3EQf_TXz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4994
+ otE0T4oBgHgl3EQfqgEA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4995
+ RNAyT4oBgHgl3EQfU_fg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4996
+ edE4T4oBgHgl3EQfqA0r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4997
+ t9E5T4oBgHgl3EQfnA8B/content/2301.05682v1.pdf filter=lfs diff=lfs merge=lfs -text
4998
+ v9E_T4oBgHgl3EQf_ByN/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4999
+ idA0T4oBgHgl3EQfIf9D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5000
+ 8dE1T4oBgHgl3EQfngSj/content/2301.03310v1.pdf filter=lfs diff=lfs merge=lfs -text
5001
+ 69E4T4oBgHgl3EQf2A2F/content/2301.05295v1.pdf filter=lfs diff=lfs merge=lfs -text
5002
+ b9E0T4oBgHgl3EQfnwFc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5003
+ UtE2T4oBgHgl3EQfCwbV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5004
+ b9E1T4oBgHgl3EQfxQXl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5005
+ XNE1T4oBgHgl3EQfvwUc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5006
+ wNE4T4oBgHgl3EQfXQy1/content/2301.05040v1.pdf filter=lfs diff=lfs merge=lfs -text
5007
+ jdE2T4oBgHgl3EQfdQcV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5008
+ stE5T4oBgHgl3EQfKg5W/content/2301.05466v1.pdf filter=lfs diff=lfs merge=lfs -text
5009
+ KtAzT4oBgHgl3EQfkP1s/content/2301.01528v1.pdf filter=lfs diff=lfs merge=lfs -text
5010
+ B9E4T4oBgHgl3EQfeA2w/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5011
+ 69E4T4oBgHgl3EQf2A2F/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5012
+ c9E1T4oBgHgl3EQfdwSQ/content/2301.03199v1.pdf filter=lfs diff=lfs merge=lfs -text
5013
+ rNFKT4oBgHgl3EQf0S6b/content/2301.11915v1.pdf filter=lfs diff=lfs merge=lfs -text
0NFJT4oBgHgl3EQfjiwF/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dccc4aa62da28fccb7f49b027d5fc81c21ec44df02ac64a76a00b4fb793d8874
3
+ size 241892
0dAyT4oBgHgl3EQfbfdc/content/tmp_files/2301.00263v1.pdf.txt ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00263v1 [math.CA] 31 Dec 2022
2
+ ON BOCHNER’S ALMOST-PERIODICITY CRITERION
3
+ PHILIPPE CIEUTAT
4
+ Abstract. We give an extension of Bochner’s criterion for the almost periodic func-
5
+ tions. By using our main result, we extend two results of A. Haraux. The first is a
6
+ generalization of Bochner’s criterion which is useful for periodic dynamical systems. The
7
+ second is a characterization of periodic functions in term of Bochner’s criterion.
8
+ 2020 Mathematic Subject Classification: 35B10, 35B40, 42A75, 47H20.
9
+ Keywords: Bochner almost periodicity, periodic function, almost periodic function,
10
+ asymptotically almost periodic function, nonlinear semigroup, periodic dynamical sys-
11
+ tem.
12
+ 1. Introduction
13
+ The almost periodic functions in the sense of Bohr have been characterized by Bochner
14
+ by means of a compactness criterion in the space of the bounded and continuous functions
15
+ [2, 3]. The Bochner’s criterion plays an essential role in the theory and in applications.
16
+ We give a new almost-periodicity criterion for functions with values in a given complete
17
+ metric space which is useful to study the almost periodicity of solutions of dynamical
18
+ systems governed by a family of operators with a positive parameter. This criterion is an
19
+ extension of Bochner’s criterion. Then Haraux gave a generalization of Bochner’s criterion
20
+ [9, Theorem 1], called a simple almost-periodicity criterion which is useful for periodic
21
+ dynamical systems. From our result, we deduce an extension of this criterion. We also
22
+ obtain an extension of an other result of Haraux which characterizes the periodic functions
23
+ in terms of the Bochner’s criterion [8]. In the same spirit, we treat the asymptotically
24
+ almost periodic case.
25
+ We give a description of this article, the precise definitions will be given in Section 2.
26
+ Throughout this section (X, d) is a complete metric space. An almost periodic function
27
+ u : R → X in the sense of Bohr is characterized by the Bochner’s criterion which is the
28
+ following: u is bounded and continuous, and from any real sequence of real numbers (τn)n,
29
+ there exists a subsequence (τφ(n))n such that the sequence of functions (u(t + τφ(n)))n is
30
+ uniformly convergent on R. In Section 3, we give two extensions of Bochner’s criterion.
31
+ First u : R → X is an almost periodic if and if only if in the Bochner’s criterion, we impose
32
+ that the terms of the sequence of real numbers (τn)n are all positive. Second u : R → X
33
+ is an almost periodic if and if only if in the Bochner’s criterion, the convergence of the
34
+ subsequence of functions (u(t + τφ(n)))n is uniform only on [0, +∞). These improvements
35
+ are useful to study the almost periodicity of solutions of an evolution equation governed by
36
+ a family of operators with a positive parameter, in particular for a C0-semigroup of linear
37
+ operators or more generally, for an autonomous dynamical system (nonlinear semigroup).
38
+ Universit´e Paris-Saclay, UVSQ, CNRS, Laboratoire de math´ematiques de Versailles, 78000, Versailles,
39
+ France. E-mail address: [email protected]
40
+ 1
41
+
42
+ 2
43
+ P. Cieutat
44
+ From our extension of Bochner’s criterion, we give new proofs which are direct and simpler
45
+ on known results on the almost periodicity of solutions of autonomous dynamic systems.
46
+ Haraux gave a generalization of Bochner’s criterion the called a simple almost-periodicity
47
+ criterion [9, Theorem 1]. This criterion makes it possible to choose in the Bochner’s cri-
48
+ terion, the sequence of real numbers (τn)n in a set of the type ωZ which is very useful
49
+ for periodic dynamical systems. From our extension of Bochner’s criterion, in Section
50
+ 4, we deduce an improvement of this result. An asymptotically almost periodic function
51
+ u : R+ → X is a perturbation of almost periodic. A such function is characterized by a
52
+ property of the type of the Bochner’s criterion. In the same spirit, we extend this char-
53
+ acterization of asymptotically almost periodic functions. Then we apply these results to
54
+ study the almost periodicity of solutions of periodic dynamical systems.
55
+ Bochner’s criterion can also be expressed in terms of the relative compactness of the
56
+ set {u(· + τ); τ ∈ R} in a suitable set of continuous functions. A periodic function is a
57
+ special case of almost periodic function. A direct consequence of [8, Proposition 2] given
58
+ by Haraux characterizes a periodic function in terms of the Bochner’s criterion. This
59
+ characterization is the following: u : R → X is continuous is periodic if and if only if the
60
+ set {u(· + τ); τ ∈ R} is compact. In Section 5, By using our improvement of Bochner’s
61
+ criterion, we give an extension of the Haraux’s characterization of periodic functions. We
62
+ will also give a result on asymptotically periodic functions of the type of Haraux result
63
+ described above. Then we apply these results to study the periodicity of solutions of
64
+ autonomous dynamical systems.
65
+ 2. Notation
66
+ Let us now give some notations, definitions and properties which will be used.
67
+ Throughout this section (X, d) is a complete metric space.
68
+ R, Z and N stand re-
69
+ spectively for the real numbers, the integers and the natural integers.
70
+ We denote by
71
+ R+ := {t ∈ R; t ≥ 0}.
72
+ Let E be a topological space.
73
+ We denote by C(E, X) the
74
+ space of all continuous functions from E into X.
75
+ When J = R or J = R+, we de-
76
+ note by BC(J, X) the space of all bounded and continuous functions from J into X
77
+ equipped with the sup-distance, denoted by d∞(u, v) := sup
78
+ t∈R
79
+ d(u(t), v(t)) when J = R and
80
+ d∞,+(u, v) := sup
81
+ t≥0
82
+ d(u(t), v(t))) when J = R+ for u, v ∈ BC(J, X). The metric spaces
83
+ (BC(R, X), d∞) and (BC(R+, X), d∞,+)) are complete.
84
+ We now give some definitions and properties on almost periodic, asymptotically almost
85
+ periodic functions with values in a given complete metric space.
86
+ A subset D of R (respectively of R+) is said to be relatively dense if there exists ℓ > 0
87
+ such that D ∩ [α, α + ℓ] ̸= ∅ for all α ∈ R (respectively α ≥ 0). A continuous function
88
+ u : R → X is said to be almost periodic (in the sense of Bohr) if for each ε > 0,
89
+ the set of ε-almost periods: P(u, ε) =
90
+
91
+ τ ∈ R ; sup
92
+ t∈R
93
+ d(u(t + τ), u(t)) ≤ ε
94
+
95
+ is relatively
96
+ dense in R. An almost periodic function u has its range u(R) relatively compact, that
97
+ is its closure denoted by cl (u(R)) is a compact set of (X, d). We denote the space of
98
+ all such functions by AP(R, X). It is a closed metric subspace of (BC(R, X), d∞). An
99
+ almost periodic function u is uniformly recurrent, that is there exists a sequence of real
100
+
101
+ On Bochner’s almost-periodicity criterion
102
+ 3
103
+ numbers (τn)n such that
104
+ lim
105
+ n→+∞ sup
106
+ t∈R
107
+ d(u(t + τn), u(t)) = 0 and
108
+ lim
109
+ n→+∞ τn = +∞. To see
110
+ that consider the Bohr’s definition of u ∈ AP(R, X), then the set of
111
+ 1
112
+ n-almost periods
113
+ satisfies P(u, 1
114
+ n)∩[n, +∞) ̸= ∅, for each integer n > 0. A useful characterization of almost
115
+ periodic functions was given by Bochner. The Bochner’s criterion which may be found in
116
+ [12, Bochner’s theorem, p. 4] in the context of metric spaces. Before to cite this criterion,
117
+ we need to introduce the translation mapping of a function of BC(R, X). For τ ∈ R and
118
+ u ∈ BC(R, X), we define the translation mapping Tτu ∈ BC(R, X) by Tτu(t) = u(t + τ)
119
+ for t ∈ R.
120
+ Theorem 2.1 (Bochner’s criterion). For u ∈ BC(R, X), the following statements are
121
+ equivalent.
122
+ i) u ∈ AP(R, X).
123
+ ii) The set {Tτu; τ ∈ R} is relatively compact in (BC(R, X), d∞).
124
+ Haraux gave a generalization of Bochner’ criterion the called a simple almost-periodicity
125
+ criterion [9, Theorem 1] which is useful for periodic dynamical systems.
126
+ Theorem 2.2 (Haraux’s criterion). Let D be a relatively dense subset of R. The following
127
+ statements are equivalent for u ∈ BC(R, X).
128
+ i) u ∈ AP(R, X).
129
+ ii) The set {Tτu; τ ∈ D} is relatively compact in (BC(R, X), d∞).
130
+ Periodic functions, which are a special case of almost periodic functions, are also char-
131
+ acterized in terms of Bochner’s criterion. This criterion is a direct consequence of a result
132
+ of Haraux.
133
+ Theorem 2.3. [8, Consequence of Proposition 2] The following statements are equivalent
134
+ for u ∈ BC(R, X).
135
+ i) u is periodic.
136
+ ii) The set {Tτu; τ ∈ R} is a compact set of (BC(R, X), d∞).
137
+ For some preliminary results on almost periodic functions with values in a given com-
138
+ plete metric space, we refer to the book of Levitan-Zhikov [12] and in the special case of
139
+ Banach spaces to the book of Amerio-Prouse [1].
140
+ The notion of asymptotic almost periodicity was first introduced by Fr´echet [6] in 1941
141
+ in the case where X = C. A continuous function u : R+ → X is said to be asymp-
142
+ totically almost periodic if there exists v ∈ AP(R, X) such that lim
143
+ t→∞ d(u(t), v(t)) = 0.
144
+ An asymptotic almost periodic function u has its range u(R+) relatively compact. We
145
+ denote the space of all such functions by AAP(R+, X). It is a closed metric subspace
146
+ of (BC(R+, X), d∞,+). An asymptotic almost periodicity function u : R+ → X is char-
147
+ acterized by u ∈ APP(R+, X) if and only if u ∈ C(R+, X) and for each ε > 0, there
148
+ exists M ≥ 0 such that the
149
+
150
+ τ ≥ 0 ; sup
151
+ t≥M
152
+ d(u(t + τ), u(t)) ≤ ε
153
+
154
+ is relatively dense in R+
155
+ [15, Theorems 1.3]. In the context of metric spaces, Ruess and Summers give a charac-
156
+ terization of asymptotically almost periodic functions in the spirit of Bochner’s criterion.
157
+
158
+ 4
159
+ P. Cieutat
160
+ To prove this characterization, Ruess and Summers use results from the paper [16] by
161
+ the same authors. For τ ≥ 0 and u ∈ BC(R+, X), we define the translation mapping
162
+ T +
163
+ τ u ∈ BC(R+, X) by T +
164
+ τ u(t) = u(t + τ) for t ≥ 0.
165
+ Theorem 2.4. [15, a part of Theorems 1.2 & 1.3] Let (X, d) be a complete metric space.
166
+ For u ∈ BC(R+, X), the following statements are equivalent.
167
+ i) u ∈ AAP(R+, X).
168
+ ii) The set {T +
169
+ τ u; τ ≥ 0} is relatively compact in (BC(R+, X), d∞,+).
170
+ For some preliminary results on asymptotically almost periodic functions, we refer to
171
+ the book of Yoshizawa [17] in the case where X is a finite dimensional space, to the book
172
+ of Zaidman [18] where X is a Babach space and to Ruess and Summers [14, 15, 16] in the
173
+ general case: X is a complete metric space.
174
+ 3. An improvement of Bochner’s criterion
175
+ An almost periodic function is characterized by the Bochner’s criterion, recalled in Sec-
176
+ tion 2. Our main result is an extension of Bochner’s criterion. Then we deduce new proofs
177
+ which are direct and simpler on known results on the solutions of autonomous dynamic
178
+ systems. Before to state our extension of Bochner’s criterion, we need to introduce the
179
+ restriction operator R : BC(R, X) → BC(R+, X) defined by R(u)(t) := u(t) for t ≥ 0
180
+ and u ∈ BC(R, X).
181
+ Theorem 3.1. Let (X, d) be a complete metric space. For u ∈ BC(R, X) the following
182
+ statements are equivalent.
183
+ i) u ∈ AP(R, X).
184
+ ii) The set {Tτu; τ ≥ 0} is relatively compact in (BC(R, X), d∞).
185
+ iii) The set {R(Tτu); τ ∈ R} is relatively compact in (BC(R+, X), d∞,+).
186
+ In our results, the compactness and the relative compactness of a set often intervene.
187
+ To prove them, we will often use the following result whose proof is obvious. Recall that
188
+ a set A of a metric space (E, d) is relatively compact if its closure denoted by cl (A) is a
189
+ compact set of (E, d).
190
+ Lemma 3.2. Let E be a set, (G1, d1) and (G2, d2) be two metric spaces. Let u : E → G1
191
+ and v : E → G2 be two functions. Assume there exists M > 0 such that
192
+ ∀x1, x2 ∈ E,
193
+ d1(u(x1), u(x2)) ≤ Md2(v(x1), v(x2)).
194
+ Then the following statements hold.
195
+ i) If the metric space (G1, d1) is complete and v(E) is relatively compact in (G2, d2),
196
+ then u(E) is relatively compact in (G1, d1).
197
+ ii) If v(E) is a compact set of (G2, d2), then u(E) is a compact set of (G1, d1).
198
+ Proof of Theorem 3.1. i) =⇒ iii). It is obvious by using the Bochner’s criterion and the
199
+ continuity of the restriction operator R.
200
+ iii) =⇒ ii). The set u(R) = {R(Tτu)(0); τ ∈ R} is relatively compact in X as the
201
+ range of {R(Tτu); τ ∈ R} by the continuous evaluation map at 0 from BC(R+, X) into
202
+ X. By assumption, H := cl ({R(Ttu) ; t ∈ R}) is a compact set of (BC(R+, X), d∞,+).
203
+
204
+ On Bochner’s almost-periodicity criterion
205
+ 5
206
+ For all τ ≥ 0, we define φτ : H → X by φτ(h) = h(τ). The functions φτ are 1-Lipschitz
207
+ continuous and for each t ∈ R, the set {φτ(R(Ttu)) = u(τ + t) ; τ ≥ 0} is included in the
208
+ relatively compact set u(R). By density of {R(Ttu) ; t ∈ R} in H and the continuity of
209
+ φτ, it follows that {φτ(h) ; τ ≥ 0} is relatively compact in X for each h ∈ H. According
210
+ to Arzel`a-Ascoli’s theorem [11, Theorem 3.1, p. 57], the set {φτ ; τ ≥ 0} is relatively
211
+ compact in C(H, X) equipped with the sup-norm denoted by dC.
212
+ From the density
213
+ of {R(Ttu) ; t ∈ R} in H and the continuity of φτ, we deduce that for τ1 and τ2 ≥ 0,
214
+ sup
215
+ h∈H
216
+ d(φτ1(h), φτ2(h)) = sup
217
+ t∈R
218
+ d (φτ1(R(Ttu)), φτ2(R(Ttu))) = sup
219
+ t∈R
220
+ d (u(τ1 + t), u(τ2 + t)) =
221
+ sup
222
+ t∈R
223
+ d (Tτ1u(t), Tτ2u(t)), then dC(φτ1, φτ2) = d∞ (Tτ1u, Tτ2u). From Lemma 3.2, it follows
224
+ that {Tτu; τ ≥ 0} is relatively compact in the complete metric space (BC(R, X), d∞)
225
+ since {φτ ; τ ≥ 0} is also one in (C(H, X), dC).
226
+ ii) =⇒ i). For τ1, τ2 ≥ 0, d∞(Tτ1u, Tτ2u) := sup
227
+ t∈R
228
+ d(u(τ1 + t), u(τ2 + t)). Replacing t
229
+ by t − τ1 − τ2 in the upper bound, we get d∞(Tτ1u, Tτ2u) = d∞(T−τ1u, T−τ2u). Then the
230
+ set {Tτu; τ ≤ 0} = {T−τu; τ ≥ 0} is relatively compact in BC(R, X) since {Tτu; τ ≥ 0}
231
+ is also one. Therefore the set {Tτu; τ ∈ R} is relatively compact in BC(R, X) as the
232
+ union of two relatively compact sets in BC(R, X).
233
+ According to Bochner’s criterion,
234
+ u ∈ AP(R, X).
235
+
236
+ The connection between the almost periodicity of a solution of a dynamical system and
237
+ its stability is well known (see the monograph by Nemytskii & Stepanov [13, Ch. 5]. This
238
+ weakened form of Bochner’s criterion: Theorem 3.1 makes it possible to obtain direct and
239
+ simpler proofs on these questions. Let us start by recalling some definitions on dynamical
240
+ systems.
241
+ A dynamical system or nonlinear semigroup on a complete metric space (X, d) is a one
242
+ parameter family (S(t))t≥0 of maps from X into itself such that i) S(t) ∈ C(X, X) for all
243
+ t ≥ 0, ii) S(0)x = x for all x ∈ X, iii) S(t + s) = S(t) ◦ S(s) for all s, t ≥ 0 and iv) the
244
+ mapping S(·)x ∈ C([0, +∞), X) for all x ∈ X.
245
+ For each x ∈ X, the positive trajectory of x is the map S(·)x : R+ → X. A function
246
+ u : R → X is called a complete trajectory if we have u(t + τ) = S(τ)u(t), for all t ∈ R
247
+ and τ ≥ 0.
248
+ We will need a notion of Lagrange-type stability to ensure that a solution with a
249
+ relatively compact range is almost periodic. Recall that (S(t))t≥0 is equicontinuous on a
250
+ compact set K of X, if forall ε > 0, there exists δ > 0, such that
251
+ ∀x1, x2 ∈ K, d(x1, x2) ≤ δ =⇒ sup
252
+ t≥0
253
+ d(x1, x2) ≤ ε.
254
+ Using Theorem 3.1, we give a new proof which is direct and simpler of the following
255
+ result which can be found in [10, Theorem 4.3.2, p. 51] or partly in [12, Markov’s theorem,
256
+ p. 10].
257
+ Corollary 3.3. Let (S(t))t≥0 be a dynamical system on a complete metric space (X, d)
258
+ and u be a complete trajectory such that u(R) is relatively compact. Then u is almost
259
+ periodic if and only if (S(t))t≥0 is equicontinuous on cl (u(R)) the closure of u(R).
260
+ Proof. Let us denote the compact set K := cl (u(R)). It follows by density of u(R) in
261
+ K and the continuity of S(t), that {S(t)x; t ≥ 0} ⊂ K for each x ∈ K.
262
+ According
263
+
264
+ 6
265
+ P. Cieutat
266
+ to Arzel`a-Ascoli’s theorem, (S(t))t≥0 is equicontinuous on K if and only if (S(t))t≥0 is
267
+ relatively compact in C(K, X). From Theorem 3.1, we have u ∈ AP(R, X) if and only if
268
+ {Tτu; τ ≥ 0} is relatively compact in BC(R, X). Then it remains to prove that (S(t))t≥0
269
+ is relatively compact in C(K, X) equipped with the sup-norm if and only if {Tτu; τ ≥ 0}
270
+ is relatively compact in (BC(R, X), d∞). This results from the following equalities, for τ1
271
+ and τ2 ≥ 0, sup
272
+ t∈R
273
+ d (Tτ1u(t), Tτ2u(t)) = sup
274
+ t∈R
275
+ d (S(τ1)u(t), S(τ2)u(t)) = sup
276
+ x∈K
277
+ d (S(τ1)x, S(τ2)x)
278
+ and Lemma 3.2.
279
+
280
+ Remark 3.4. a) The condition of equicontinuity required by Corollary 3.3 is satisfied by
281
+ a bounded dynamical system : d (S(t)x1, S(t)x2) ≤ Md (x1, x2) for some M ≥ 1 and
282
+ in particular for a C0 semigroup of contractions. In this case, the almost periodicity of
283
+ a complete trajectory u having a relatively compact range results from Corollary 3.3.
284
+ We can also obtain this result with the implication iii) =⇒ i) of Theorem 3.1 and the
285
+ inequality sup
286
+ t≥0
287
+ d(R(Tτ1u)(t), R(Tτ2u)(t)) = sup
288
+ t≥0
289
+ d (S(t)u(τ1), S(t)u(τ2) ≤ Md(u(τ1), u(τ2))
290
+ for τ1, τ2 ∈ R.
291
+ b) For a bounded C0-semigroup (S(t))t≥0, the main result of Zaidman [19] asserts
292
+ that a positive trajectory u with relatively compact range satisfies a condition called the
293
+ generalized normality property in Bochner’s sense, without concluding that u is almost
294
+ periodic. This condition is nothing but hypothesis iii) of Theorem 3.1, so u is almost
295
+ periodic.
296
+ Using Theorem 2.4, we give a new proof which is direct and simpler of the following
297
+ result which can be found in [15, Theorem 2.2, p. 149].
298
+ Corollary 3.5. Let (S(t))t≥0 be a dynamical system on a complete metric space (X, d) and
299
+ u be a positive trajectory such that u(R+) is relatively compact. Then u is asymptotically
300
+ almost periodic if and only if (S(t))t≥0 is equicontinuous on cl (u(R+)).
301
+ Proof. The proof is analogous to that of Corollary 3.3, using Corollary 2.4 instead of
302
+ Theorem 3.1 and by replacing R by R+ and AP(R, X) by AAP(R+, X).
303
+
304
+ 4. An improvement of Haraux’s criterion
305
+ Haraux gave a generalization of Bochner’scriterion [9, Theorem 1], the called a simple
306
+ almost-periodicity criterion which is useful for periodic dynamical systems.
307
+ From our
308
+ main result, Theorem 3.1, we deduce an extension of the Haraux’s criterion, recalled in
309
+ Section 2. In the same spirit, we extend the well-known characterization of asymptotically
310
+ almost periodic functions. To end this section, we give an exemple of application on a
311
+ periodic dynamical system.
312
+ We give an extension of the Haraux’s criterion (see Theorem 2.2). Recall that we denote
313
+ by R the restriction operator R : BC(R, X) → BC(R+, X) defined by R(u)(t) := u(t) for
314
+ t ≥ 0 and u ∈ BC(R, X).
315
+ Corollary 4.1. Let (X, d) be a complete metric space. For u ∈ BC(R, X) the following
316
+ statements are equivalent.
317
+ i) u ∈ AP(R, X).
318
+ ii) The set {Tτu; τ ∈ D} is relatively compact in (BC(R, X), d∞) where D be a relatively
319
+ dense subset of R+.
320
+
321
+ On Bochner’s almost-periodicity criterion
322
+ 7
323
+ iii) The set {R(Tτu); τ ∈ D} is relatively compact in (BC(R+, X), d∞,+) where D be a
324
+ relatively dense subset of R.
325
+ Remark 4.2. Our main result, Theorem 3.1 is obviously a particular case of Corollary 4.1.
326
+ But to present our results, it was easier to start with Theorem 3.1. To prove Corollary
327
+ 4.1, we use Haraux’s criterion and Theorem 3.1.
328
+ Proof of Corollary 4.1. i) =⇒ iii). It is a consequence of Theorem 3.1.
329
+ iii) =⇒ ii). To establish this implication, using Theorem 3.1, it suffices to show that
330
+ assertion iii) implies that {R(Tτu); τ ∈ R} is relatively compact in BC(R+, X). The proof
331
+ of this last implication is a slight adaptation of those of those of the Haraux’s criterion
332
+ given in [9, Theorem 1]. A similar proof will be detailed in the following result as there
333
+ will be technical issues. To demonstrate that {R(Tτu); τ ∈ R} is relatively compact, it
334
+ suffices in the proof of ii) =⇒ i) of Corollary 4.3 to take ℓ = 0 and replace {T +
335
+ τ u; τ ∈ D}
336
+ by {R(Tτu); τ ∈ D}.
337
+ ii) =⇒ i). For τ1, τ2 ≥ 0, d∞(Tτ1u, Tτ2u) = sup
338
+ t∈R
339
+ d(u(τ1 + t), u(τ2 + t)). Replacing t by
340
+ t − τ1 − τ2 in the upper bound, we get d∞(Tτ1u, Tτ2u) = d∞(T−τ1u, T−τ2u). Then the set
341
+ {Tτu; τ ∈ −D} = {T−τu; τ ∈ D} is relatively compact in BC(R, X) since {Tτu; τ ∈ D}
342
+ is also one. Therefore the set {Tτu; τ ∈ D ∪ (−D)} is relatively compact in BC(R, X).
343
+ Moreover D ∪(−D) is a relatively dense subset of R. According to Haraux’s criterion, we
344
+ have u ∈ AP(R, X).
345
+
346
+ We extend Theorem 2.4, the well-known characterization of asymptotically almost pe-
347
+ riodic functions. For τ ∈ R+ and u ∈ BC(R+, X), we define the translation mapping
348
+ T +
349
+ τ u ∈ BC(R+, X) by T +
350
+ τ u(t) = u(t + τ) for t ≥ 0.
351
+ Corollary 4.3. Let (X, d) be a complete metric space and let D be a relatively dense
352
+ subset of R+. For u ∈ BC(R+, X) the following statements are equivalent.
353
+ i) u ∈ AAP(R+, X).
354
+ ii) The set {T +
355
+ τ u; τ ∈ D} is relatively compact in (BC(R+, X), d∞,+).
356
+ Remark 4.4. To establish implication ii) =⇒ i), by using Theorem 2.4, it suffices to prove
357
+ that assertion ii) implies that {T +
358
+ τ u; τ ≥ 0} is relatively compact in (BC(R+, X), d∞,+).
359
+ The proof of this last implication is an adaptation of those of the Haraux’s criterion. But
360
+ contrary to the proof of implication iii) =⇒ ii) in Corollary 4.1, there are technical issues.
361
+ These technical difficulties come from the fact that when D is a relatively dense subset
362
+ in R+, the sets D and [t − ℓ, t] can be disjoint for some 0 ≤ t ≤ ℓ. For this reason we give
363
+ the complete proof of this implication.
364
+ Proof of Corollary 4.3. i) =⇒ ii). It is a consequence of Theorem 2.4.
365
+ ii) =⇒ i). We will prove that assumption ii) implies {T +
366
+ τ u; τ ≥ 0} is relatively compact
367
+ in BC(R+, X), then we conclude by using Theorem 2.4. The subset D being relatively
368
+ dense in R+, there exists ℓ > 0 such that D ∩ [α, α + ℓ] ̸= ∅ for all α ≥ 0.
369
+ • We prove that u is uniformly continuous on [ℓ, +∞). Let us fix ε > 0. By assump-
370
+ tion the set {T +
371
+ τ u; τ ∈ D} is in particular relatively compact in C([0, 2ℓ], X), then it is
372
+ uniformly equicontinuous on [0, 2ℓ], that is there exists δ > 0 such that
373
+ s1, s2 ∈ [0, 2l],
374
+ |s1 − s2| ≤ δ =⇒ sup
375
+ τ∈D
376
+ d(u(s1 + τ), u(s2 + τ)) ≤ ε.
377
+ (4.1)
378
+
379
+ 8
380
+ P. Cieutat
381
+ Let t1, t2 be two real numbers such that t1, t2 ≥ ℓ and |t1 − t2| ≤ δ. We can assume
382
+ without loss of generality that ℓ ≤ t1 ≤ t2 ≤ t1 + ℓ. We have D ∩ [t1 − ℓ, t1] ̸= ∅ since
383
+ t1 − ℓ ≥ 0, then there exists τ ∈ D such that 0 ≤ t1 − τ ≤ t2 − τ ≤ 2l. Taking account
384
+ (4.1), we deduce that d(u(t1), u(t2)) = d(u((t1 − τ) + τ), u((t2 − τ) + τ)) ≤ ε. Hence u is
385
+ uniformly continuous on [ℓ, +∞).
386
+ • We prove that {T +
387
+ τ u; τ ≥ ℓ} is relatively compact in BC(R+, X) . Let (tn)n be a
388
+ sequence of real numbers such that tn ≥ ℓ. We have D ∩ [tn − ℓ, tn] ̸= ∅ for each n ∈ N,
389
+ since tn − ℓ ≥ 0, then there exist τn ∈ D and σn ∈ [0, l] such that tn = τn + σn. By
390
+ compactness of the sequences (σn)n in [0, ℓ] and (T +
391
+ τnu)n in BC(R+, X), it follows that
392
+ lim
393
+ n→+∞ σn = σ and
394
+ lim
395
+ n→+∞ sup
396
+ t≥0
397
+ d(u(t + τn), v(t)) = 0 (up to a subsequence).
398
+ From the
399
+ following inequality
400
+ sup
401
+ t≥0
402
+ d(u(tn + t), v(σ + t)) ≤ sup
403
+ t≥0
404
+ d(u(τn + σn + t), u(τn + σ + t) + sup
405
+ t≥0
406
+ d(u(τn + t), v(t))
407
+ and the uniform continuity of u, we deduce that lim
408
+ n→+∞ sup
409
+ t≥0
410
+ d(u(tn +t), v(σ +t)) = 0. Then
411
+ {T +
412
+ τ u; τ ≥ ℓ} is relatively compact in BC(R+, X).
413
+ • We prove that u ∈ AAP(R+, X). The function u is uniformly continuous on R+,
414
+ since u is continuous on [0, ℓ] and uniformly continuous on [ℓ, +∞). Then the map ˆu :
415
+ R+ → BC(R+, X) defined by ˆu(τ) = T +
416
+ τ u for τ ≥ 0 is continuous, consequently the
417
+ set {T +
418
+ τ u; 0 ≤ τ ≤ ℓ} is relatively compact in BC(R+, X). The set {T +
419
+ τ u; τ ≥ 0} is
420
+ relatively compact in BC(R+, X) as the union of two relatively compact sets. According
421
+ to Theorem 2.4, u ∈ AAP(R, X).
422
+
423
+ Using corollaries 4.1 and 4.3, we give a proof which is direct and simpler of the following
424
+ result which can be found in [7, 9, 10]. Before we recall some definitions on process.
425
+ A process on a complete metric space (X, d) according to Dafermos [4] is a two pa-
426
+ rameter family U(t, τ) of maps from X into itself defined for (t, τ) ∈ R × R+ and such
427
+ that i) U(t, 0)x = x for all (t, x) ∈ R × X, ii) U(t, σ + τ) = U(t + σ, τ) ◦ U(t, σ) for all
428
+ (t, σ, τ) ∈ R×R+×R+ and iii) the mapping U(t, ·)x ∈ C([0, +∞), X) for all (t, x) ∈ R×X.
429
+ For each x ∈ X, the positive trajectory starting of x is the map U(0, ·)x : R+ → X. A
430
+ function u : R → X is called a complete trajectory if we have u(t + τ) = U(t, τ)u(t) for
431
+ all (t, τ) ∈ R × R+.
432
+ A process U is said ω-periodic (ω > 0) if U(t + ω, τ) = U(t, τ) for all (t, τ) ∈ R × R+.
433
+ A process U is said bounded if we have for some M ≥ 1 for all (τ, x1, x2) ∈ R+ ×X ×X
434
+ d (U(0, τ)x1, U(0, τ)x2) ≤ Md (x1, x2).
435
+ Corollary 4.5. [7, 9], [10, Th´eor`eme 6.4.6, p. 84] Let U be a ω-periodic process on a
436
+ complete metric space (X, d). If U is bounded, then the following statements hold.
437
+ i) If u is a complete trajectory of U such that u(−ωN) is relatively compact, then u is
438
+ almost periodic.
439
+ ii) If u is a positive trajectory of U such that u(ωN) is relatively compact, then u is
440
+ asymptotically almost periodic.
441
+ Proof. i) The process U is ω-periodic, then we have u(nω) = U(−mω, (n+m)ω)u(−mω) =
442
+ U(0, (n + m)ω)u(−mω) for all n, m ∈ N. From the boundedness assumption on U, we
443
+
444
+ On Bochner’s almost-periodicity criterion
445
+ 9
446
+ deduce that d (u(nω), u(mω)) ≤ Md (u(−mω), u(−nω)), then u(ωN) is relatively compact
447
+ since u(−ωN) is also one, therefore u(ωZ) is relatively compact. From assumptions on
448
+ the process U, it follows that for all n, m ∈ Z,
449
+ sup
450
+ τ≥0
451
+ d (u(τ + nω), u(τ + mω)) ≤ Md (u(nω), u(mω)) .
452
+ (4.2)
453
+ From Lemma 3.2, {R(Tnωu); n ∈ Z} is relatively compact in (BC(R+, X), d∞,+) since
454
+ u(ωZ) is also one in (X, d). We conclude with Corollary 4.1 by setting D = ωZ.
455
+ ii) For all n, m ∈ N, (4.2) holds on the positive trajectory u, then from Lemma 3.2,
456
+ {T +
457
+ nωu; n ∈ N} is relatively compact in (BC(R+, X), d∞,+) since u(ωN) is also one in
458
+ (X, d). We conclude with Corollary 4.3 by setting D = ωN.
459
+
460
+ 5. Bochner’s criterion in the periodic case
461
+ Periodic functions are a special case of almost periodic functions. Haraux gave a charac-
462
+ terization of periodic functions in terms of Bochner’s criterion which is recalled in Section
463
+ 2.
464
+ This criterion is a direct consequence of [8, Proposition 2].
465
+ Haraux established a
466
+ general result [8, Th´eor`eme 1] implying as a special case a characterization of periodic
467
+ functions and the fact that any compact trajectory of a one-parameter continuous group
468
+ is automatically periodic.
469
+ In this section, we give an extension of this characterization of periodic functions in the
470
+ spirit of the main result of this article. We also treat the asymptotically periodic case.
471
+ Then we apply these results to study the periodicity of solutions of dynamical systems.
472
+ Recall that we denote by R the restriction operator R : BC(R, X) → BC(R+, X)
473
+ defined by R(u)(t) := u(t) for t ≥ 0 and u ∈ BC(R, X).
474
+ Corollary 5.1. Let (X, d) be a complete metric space. For u ∈ BC(R, X) the following
475
+ statements are equivalent.
476
+ i) The function u is ω-periodic (ω > 0).
477
+ ii) The set {Tτu; τ ≥ 0} is a compact set of (BC(R, X), d∞).
478
+ iii) The set {R(Tτu); τ ∈ R} is a compact set of (BC(R+, X), d∞,+).
479
+ Proof. i) =⇒ ii). From assumption, it follows that the function τ → Tτu from R into
480
+ BC(R, X) is continuous and ω-periodic. Then the {Tτu; τ ≥ 0} = {Tτu; 0 ≤ τ ≤ ω} is a
481
+ compact set of (BC(R, X), d∞) as the range of a compact set by a continuous map.
482
+ ii) =⇒ i).
483
+ For τ1, τ2 ∈ R, d∞(Tτ1u, Tτ2u) := sup
484
+ t∈R
485
+ d(u(τ1 + t), u(τ2 + t)), we get
486
+ d∞(Tτ1u, Tτ2u) = d∞(T−τ1u, T−τ2u).
487
+ Then the set {Tτu; τ ≤ 0} = {T−τu; τ ≥ 0} is
488
+ compact in BC(R, X) since {Tτu; τ ≥ 0} is also one. Therefore the set {Tτu; τ ∈ R} is a
489
+ compact set of BC(R, X) as the union of two compact sets in BC(R, X). According to
490
+ Theorem 2.3, u is periodic.
491
+ i) =⇒ iii). It is obvious by using Theorem 2.3 and the continuity of the restriction
492
+ operator R.
493
+ iii) =⇒ i). By using Theorem 2.3, we have to prove that K := {Tτu; τ ∈ R} is a
494
+ compact set of (BC(R, X), d∞). As consequence of Theorem 3.1 and Bohner’s criterion,
495
+ the set K is relatively compact in (BC(R, X), d∞) and the function u is almost periodic.
496
+
497
+ 10
498
+ P. Cieutat
499
+ It remains to prove that K is closed in (BC(R, X), d∞). Let (τn)n be a sequence of real
500
+ numbers such that
501
+ lim
502
+ n→+∞ d∞(Tτnu, v) = 0. Let us prove that v = Tτu for some τ ∈ R. By
503
+ continuity of the operator R, we have lim
504
+ n→+∞ d∞,+(R(Tτnu), R(v)) = 0. By assumption, the
505
+ set {R(Tτu); τ ∈ R} is in particular closed in (BC(R+, X), d∞,+), then R(v) = R(Tτu)
506
+ for some τ ∈ R, that is
507
+ ∀t ≥ 0,
508
+ v(t) = Tτu(t).
509
+ (5.1)
510
+ We have to prove that (5.1) holds on the whole real line. The function Tτu is almost
511
+ periodic as translation of an almost periodic function and v is also one as uniform limit
512
+ on R of almost periodic functions. Let us denote by φ : R → R the function defined by
513
+ φ(t) := d(Tτu(t), v(t)). The function φ is almost periodic [12, Property 4, p. 3 & 7, p.6].
514
+ An almost periodic function is uniformly recurrent, then there exists a sequence of real
515
+ numbers such that
516
+ lim
517
+ n→+∞ τn = +∞ and
518
+ lim
519
+ n→+∞ φ(t+ τn) = φ(t) for all t ∈ R. From (5.1), it
520
+ follows φ(t) = 0 for all t ≥ 0, so we deduce that φ(t) =
521
+ lim
522
+ n→+∞ φ(t + τn) = 0 for all t ∈ R.
523
+ Then v(t) = Tτu(t) for all t ∈ R. This ends the proof.
524
+
525
+ According Theorem 2.4, if the set {T +
526
+ τ u; τ ≥ 0} is relatively compact in BC(R+, X),
527
+ then the function u is asymptotically almost periodic. We now give an answer to the
528
+ question what can be said about the function u when {T +
529
+ τ u; τ ≥ 0} is a compact set of
530
+ BC(R+, X). For u ∈ BC(R+, X), we say that u is ω-periodic (ω > 0) on [t0, +∞) for
531
+ some t0 ≥ 0 if u(t + ω) = u(t) for all t ≥ t0.
532
+ Corollary 5.2. Let (X, d) be a complete metric space. For u ∈ BC(R+, X) the following
533
+ statements are equivalent.
534
+ i) There exists t0 ≥ 0 such that u is ω-periodic on [t0, +∞).
535
+ ii) The set {T +
536
+ τ u; �� ≥ 0} is a compact set of (BC(R+, X), d∞,+).
537
+ Remark 5.3. Let u be a function which satisfies condition i) of Corollary 5.2.
538
+ i) Let us denote by v ∈ C(R, X) the ω-periodic function satisfying u(t) = v(t) for
539
+ t ≥ t0. A such function v exists and is unique, v is defined by v(t) = u(t − [ t−t0
540
+ ω ]ω) where
541
+ [ t−t0
542
+ ω ] denotes the integer part of t−t0
543
+ ω .
544
+ ii) The function u is a special case of asymptotic almost periodic function where the
545
+ almost periodic function v is periodic and d(u(t), v(t)) = 0 for t ≥ t0.
546
+ Proof of Corollary 5.2. i) =⇒ ii). Let us denote by v the function defined in Remark 5.3.
547
+ By Corollary 5.1 and the periodicity of v, we have {R(Tτv); τ ≥ t0} = {R(Tτv); τ ∈ R} is
548
+ a compact set of (BC(R+, X), d∞,+).
549
+ First, we have T +
550
+ τ u = R(Tτv) for τ ≥ t0, then {T +
551
+ τ u; τ ≥ t0} is a compact set.
552
+ Second, the function u is uniformly continuous on R+, then the function from R+ to
553
+ BC(R+, X) defined by τ → T +
554
+ τ u is continuous.
555
+ Then the set {T +
556
+ τ u; 0 ≤ τ ≤ t0} is
557
+ compact.
558
+ Therefore the set {T +
559
+ τ u; τ ≥ 0} is a compact set of BC(R+, X) as the union of two
560
+ compact set.
561
+ ii) =⇒ i). As consequence of Theorem 2.4, the function u is asymptotically almost
562
+ periodic, that is lim
563
+ t→∞ d(u(t), v(t)) = 0 for some v ∈ AP(R, X).
564
+ An almost periodic
565
+
566
+ On Bochner’s almost-periodicity criterion
567
+ 11
568
+ function is uniformly recurrent, then there exists a sequence of real numbers (tn)n such
569
+ that
570
+ lim
571
+ n→+∞ tn = +∞ and
572
+ lim
573
+ n→+∞ v(t + tn) = v(t) for all t ∈ R. We deduce that
574
+ ∀t ∈ R,
575
+ lim
576
+ n→+∞ u(t + tn) = v(t).
577
+ (5.2)
578
+ First we prove that v is periodic. For t ∈ R, τ1, τ2 ≥ 0, we have for n enough large
579
+ d(u(t + tn + τ1), u(t + tn + τ2)) ≤ sup
580
+ s≥0
581
+ d(u(s + τ1), u(s + τ2)). From (5.2), it follows that
582
+ sup
583
+ t∈R
584
+ d(v(t+τ1), v(t+τ2)) ≤ sup
585
+ s≥0
586
+ d(u(s+τ1), u(s+τ2)) for each τ1 and τ2 ≥ 0. According to
587
+ Lemma 3.2, {Tτv ; τ ≥ 0} is a compact set of (BC(R, X), d∞) since {T +
588
+ τ u ; τ ≥ 0} is also
589
+ one in (BC(R+, X), d∞,+). As consequence of Corollary 5.1, the function v is periodic.
590
+ Second we prove that: ∃t0 ≥ 0 such that ∀t ≥ 0, v(t) = u(t + t0). By compactness of
591
+ {T +
592
+ τ u; τ ≥ 0}, there exists a subsequence (T +
593
+ tφ(n)u)n such that lim
594
+ n→+∞ d∞,+(T +
595
+ tφ(n)u, T +
596
+ t0 u) = 0
597
+ for some t0 ≥ 0. From (5.2) we deduce that R(v) = T +
598
+ t0 u, that is v(t) = u(t + t0) for all
599
+ t ≥ 0.
600
+ Then u(t) = v(t − t0) for each t ≥ t0 where the function v(· − t0) is periodic on R.
601
+
602
+ Now we give an example of application on dynamical systems of Corollary of 5.1 and
603
+ Corollary of 5.2. For the definition of a dynamical system, see above Corollary 3.3 in
604
+ Section 3.
605
+ Corollary 5.4. Let (S(t))t≥0 be a dynamical system on a complete metric space (X, d).
606
+ i) If u is a positive trajectory, then u is periodic on [t0, +∞) for some t0 ≥ 0 if and
607
+ only if u(R+) is a compact set and (S(t))t≥0 is equicontinuous on u(R+).
608
+ ii) If u is a complete trajectory, then u is periodic if and only if u(R) is a compact set
609
+ and (S(t))t≥0 is equicontinuous on u(R).
610
+ iii) There exists a complete trajectory which is periodic if and only if there exists a
611
+ positive trajectory u such that u(R+) is a compact set and (S(t))t≥0 is equicontinuous on
612
+ u(R+).
613
+ Remark 5.5. Thus under the assumption of equicontinuity, a complete trajectory of a
614
+ dynamical system with a compact range is necessarily periodic, although there are almost
615
+ periodic functions with a compact range, which are not periodic. An example of such
616
+ function is given by Haraux in [8].
617
+ Proof of Corollary 5.4. i) Remark that if u is a positive trajectory which is periodic on
618
+ [t0, +∞) for some t0 ≥ 0, then first u(R+) is compact and second u ∈ AAP(R+, X)
619
+ (see Remark 5.3). As consequence of Corollary 3.5, the set (S(t))t≥0 is equicontinuous on
620
+ u(R+). Reciprocally assume the positive trajectory u is such that (S(t))t≥0 is equicontinu-
621
+ ous on the compact set u(R+). It remains to prove that the positive trajectory u is periodic
622
+ on [t0, +∞) for some t0 ≥ 0. For each x ∈ u(R+), the map S(·)x is continuous and satisfies
623
+ S(t)x ∈ u(R+) for each t ≥ 0. Then the map S(·)x is bounded and continuous , so the map
624
+ Φ : u(R+) → BC(R+, X) with Φ(x) = S(·)x is well-defined. The continuity of Φ results
625
+ of the equicontinuity of (S(t))t≥0 on u(R+). Then the set Φ(u(R+)) = {Φ(u(τ)) ; τ ≥ 0}
626
+ is a compact of BC(R+, X). Moreover Φ(u(τ))(t) = S(t)u(τ) = u(t + τ) for t and τ ≥ 0,
627
+
628
+ 12
629
+ P. Cieutat
630
+ then Φ(u(τ)) = T +
631
+ τ u, so {T +
632
+ τ u ; τ ≥ 0} is a compact set of BC(R+, X). According to
633
+ Corollary 5.2, the function u is periodic on [t0, +∞) for some t0 ≥ 0.
634
+ ii) The proof of ii) is similar to that of i) by using Corollary 3.3 instead of 3.5, Corollary
635
+ 5.1 instead of Corollary 5.2 and by replacing the map Φ : u(R+) → BC(R+, X) with
636
+ Φ(x) = S(·)x by the map Φ : u(R) → BC(R+, X). This permits to prove that the set
637
+ {Φ(u(τ)) = R(Tτu); τ ∈ R} is a compact set of (BC(R+, X), d∞,+).
638
+ iii) If v is a complete trajectory which is periodic, then v(R) is compact and according
639
+ to ii), (S(t))t≥0 is equicontinuous on v(R). So the restriction u of v on R+ is a posi-
640
+ tive trajectory such that u(R+) = v(R+) = v(R) since v is periodic, then (S(t))t≥0 is
641
+ equicontinuous on the compact set u(R+). Reciprocally, assume that u is a positive tra-
642
+ jectory such that (S(t))t≥0 is equicontinuous on the compact set u(R+). According to
643
+ i), u is ω-periodic on [t0, +∞) for some t0 ≥ 0. Let us denote by v the function defined
644
+ in Remark 5.3. For t ≥ s, there exists n0 ∈ N such that s + n0ω ≥ t0. The function
645
+ v is ω-periodic and u is a positive trajectory satisfying u(τ) = v(τ) for τ ≥ t0, then
646
+ v(t) = v(t + n0ω) = u(t + n0ω) = T(t − s)u(s + n0ω) = T(t − s)v(s + n0ω) = T(t − s)v(s)
647
+ for t ∈ R and n enough large. Then v is a periodic complete trajectory.
648
+
649
+ Remark 5.6. Under i) of Corollary 5.4, one can have t0 > 0, that is the positive trajectory
650
+ u is not the restriction of a periodic complete trajectory.
651
+ For example, consider the
652
+ bounded dynamical system (S(t))t≥0 on L1(0, 1) defined by
653
+ (S(t)x)(s) =
654
+
655
+
656
+
657
+ x(s − t)
658
+ if
659
+ t < s < 1
660
+ 0
661
+ if
662
+ 0 < s < t
663
+ for x ∈ L1(0, 1) and 0 < t < 1. For t ≥ 1, we set S(t) = 0. Then all positive trajectories
664
+ have a compact range and the alone complete trajectory is the null function. Thus all
665
+ positive trajectories are not the restriction of a periodic complete trajectory except the
666
+ null function.
667
+ Not all dynamical systems have this pathology, some systems are such that if two
668
+ positive trajectories have the same value at the same time, then they are equal. If we
669
+ consider such systems, we get more refined results from Corollary 5.4.
670
+ A dynamical system (S(t))t≥0 has the backward uniqueness property if any two positive
671
+ trajectories having the same value at t = t0 ≥ 0 coincide for any other t ≥ 0. This
672
+ property is equivalent to S(t) ∈ C(X, X) is injective for each t ≥ 0. We say that a
673
+ positive trajectory u is extendable to a periodic complete trajectory, if there exists a periodic
674
+ complete trajectory such that its restriction on R+ is u.
675
+ Corollary 5.7. Let (S(t))t≥0 be a dynamical system on a complete metric space (X, d).
676
+ Assume that (S(t))t≥0 has the backward uniqueness property.
677
+ i) If u is a positive trajectory, then u is periodic on R+ if and only if u(R+) is a
678
+ compact set and (S(t))t≥0 is equicontinuous on u(R+). In this case the positive trajectory
679
+ u is extendable to a periodic complete trajectory v.
680
+ ii) If v is a complete trajectory, then v is periodic if and only if v(R+) is a compact set
681
+ and (S(t))t≥0 is equicontinuous on v(R+).
682
+
683
+ On Bochner’s almost-periodicity criterion
684
+ 13
685
+ Proof. i) The direct implication results of i) of Corollary 5.4. For the reciprocal implica-
686
+ tion we use i) of Corollary 5.4. Then the positive trajectory u is ω-periodic on [t0, +∞) for
687
+ some t0 ≥ 0. Let us denote by v ∈ C(R, X) the ω-periodic function satisfying u(t) = v(t)
688
+ for t ≥ t0 (see Remark 5.3). The restriction of v on R+ and u are two positive trajectories
689
+ having the same value at t = t0 (t0 ≥ 0). From the backward uniqueness property, we
690
+ have u(t) = v(t) for t ≥ 0, then u is periodic on R+. By build, v is periodic and as in the
691
+ proof of iii) of Corollary 5.4, we deduce that v is a complete trajectory.
692
+ ii) The direct implication results of ii) Corollary 5.4, since v(R+) = v(R). For the
693
+ reciprocal implication, we consider v a complete trajectory such that v(R+) is a compact
694
+ set and (S(t))t≥0 is equicontinuous on v(R+). Then the restriction u of the complete
695
+ trajectory v on R+ is a positive trajectory such that u(R+) is compact and (S(t))t≥0
696
+ is equicontinuous on u(R+). According to i), u is ω-periodic on R+. Let us denote by
697
+ w ∈ C(R, X) the ω-periodic function satisfying u(t) = w(t) for t ≥ 0. As in proof of iii)
698
+ Corollary 5.4, we deduce that w is a complete trajectory. Fix T > 0. The two maps ˜v
699
+ and ˜w : R+ → X defined by ˜v = v(·, −T) and ˜w = w(·, −T) are two positive trajectories
700
+ having the same value at t = T. From the backward uniqueness property, we have ˜v = ˜w,
701
+ that is v(t) = w(t) for t ≥ −T. Since T is arbitrary, then v(t) = w(t) for each t ∈ R
702
+ where w is a periodic complete trajectory.
703
+ This proves that v is a periodic complete
704
+ trajectory.
705
+
706
+ References
707
+ [1] L. Amerio, G. Prouse, Almost periodic functions and functional equations, Van Nostrand Reinhold
708
+ Comp., New York, 1971.
709
+ [2] S. Bochner, A new approach to almost periodicity, Proc. Natl. Acad. Sci. USA 48 (1962) 2039-2043.
710
+ [3] S. Bochner, Continuous mappings of almost automorphic and almost periodic functions, Proc. Natl.
711
+ Acad. Sci. USA 52 (1964) 907-910.
712
+ [4] C. M. Dafermos, Almost periodic processes and almost periodic solutions of evolution equations. Dy-
713
+ namical systems (Proc. Internat. Sympos., Univ. Florida, Gainesville, Fla., 1976), pp. 43-57. Academic
714
+ Press, New York, 1977.
715
+ [5] A. M. Fink, Almost periodic differential equations, Lecture Notes in Math., vol. 377, Springer-Verlag,
716
+ Berlin-New York, 1974.
717
+ [6] M. Fr´echet, Les fonctions asymptotiquement presque-p´eriodiques continues, C.R. Math. Acad. Sci.
718
+ Paris 213 (1941), pp. 520-522 (in French).
719
+ [7] A. Haraux, Asymptotic behavior of trajectories for some nonautonomous, almost periodic processes,
720
+ J. Differential Equations 49 (1983), 473-483.
721
+ [8] A. Haraux, Sur les trajectoires compactes de syst`emes dynamiques autonomes. [On the compact tra-
722
+ jectories of autonomous dynamical systems], Portugal. Math. 44 (1987), 253-259 (in French).
723
+ [9] A. Haraux, A simple almost-periodicity criterion and applications, J. Differential Equations 66 (1987),
724
+ 51-61.
725
+ [10] A. Haraux, Syst`emes dynamiques dissipatifs et applications. [Dissipative dynamical systems and ap-
726
+ plications], Recherches en Math´ematiques Appliqu´ees [Research in Applied Mathematics], 17, Masson,
727
+ Paris, 1991 (in French).
728
+ [11] S. Lang Real and functional analysis, Third edition, Springer-Verlag, New York, 1993.
729
+ [12] B. M. Levitan, V. V. Zhikov, Almost periodic functions and differential equations,Translated from
730
+ the Russian by L. W. Longdon. Cambridge University Press, Cambridge-New York, 1982.
731
+ [13] V. Nemytskii and V. Stepanov, Qualitative theory of differential equations, Princeton University
732
+ Press, Princeton, New Jersey, 1960.
733
+ [14] W. M. Ruess, W. H. Summers, Asymptotic almost periodicity and motions of semigroups of operators,
734
+ Proceedings of the symposium on operator theory (Athens, 1985). Linear Algebra Appl. 84 (1986),
735
+ 335-351.
736
+
737
+ 14
738
+ P. Cieutat
739
+ [15] W. M. Ruess, W. H. Summers, Minimal sets of almost periodic motions Math. Ann. 276 (1986),
740
+ 145-158.
741
+ [16] W. M. Ruess, W. H. Summers, Compactness in spaces of vector valued continuous functions and
742
+ asymptotic almost periodicity, Math. Nachr. 135 (1988), 7-33.
743
+ [17] T. Yoshizawa, Stability theory and the existence of periodic solutions and almost periodic solutions,
744
+ Springer, New-york, 1975.
745
+ [18] S. Zaidman, Almost-periodic functions in abstract spaces, Research Notes in Mathematics, 126,
746
+ Boston, 1985.
747
+ [19] S. Zaidman, On relatively compact trajectories of semigroups of class C0, Applicable Anal. 21 (1986),
748
+ 9-12.
749
+
0dAyT4oBgHgl3EQfbfdc/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
0tE3T4oBgHgl3EQfnAp3/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf83ead0f1de17449f4a18a26962adcee156575239f06c6df09b49d5510e0eae
3
+ size 142865
19A0T4oBgHgl3EQfMv_l/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9e29b5d77a5b7f0546ad62980cefbb9c9728b083a9da457da0d94c2d725a84
3
+ size 4653101
1dAzT4oBgHgl3EQfRfuL/content/tmp_files/2301.01217v1.pdf.txt ADDED
@@ -0,0 +1,1312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Unlearnable Clusters: Towards Label-agnostic Unlearnable Examples
2
+ Jiaming Zhang1 Xingjun Ma2* Qi Yi1 Jitao Sang1,4∗ Yugang Jiang2 Yaowei Wang4 Changsheng Xu3,4
3
+ 1Beijing Jiaotong University 2Fudan University 3Chinese Academy of Sciences 4Peng Cheng Lab
4
+ Abstract
5
+ There is a growing interest in developing unlearnable
6
+ examples (UEs) against visual privacy leaks on the Internet.
7
+ UEs are training samples added with invisible but unlearn-
8
+ able noise, which have been found can prevent unauthorized
9
+ training of machine learning models. UEs typically are gen-
10
+ erated via a bilevel optimization framework with a surrogate
11
+ model to remove (minimize) errors from the original samples,
12
+ and then applied to protect the data against unknown target
13
+ models. However, existing UE generation methods all rely
14
+ on an ideal assumption called label-consistency, where the
15
+ hackers and protectors are assumed to hold the same label
16
+ for a given sample. In this work, we propose and promote
17
+ a more practical label-agnostic setting, where the hackers
18
+ may exploit the protected data quite differently from the
19
+ protectors. E.g., a m-class unlearnable dataset held by the
20
+ protector may be exploited by the hacker as a n-class dataset.
21
+ Existing UE generation methods are rendered ineffective in
22
+ this challenging setting. To tackle this challenge, we present
23
+ a novel technique called Unlearnable Clusters (UCs) to gen-
24
+ erate label-agnostic unlearnable examples with cluster-wise
25
+ perturbations. Furthermore, we propose to leverage Vision-
26
+ and-Language Pre-trained Models (VLPMs) like CLIP as the
27
+ surrogate model to improve the transferability of the crafted
28
+ UCs to diverse domains. We empirically verify the effective-
29
+ ness of our proposed approach under a variety of settings
30
+ with different datasets, target models, and even commercial
31
+ platforms Microsoft Azure and Baidu PaddlePaddle.
32
+ 1. Introduction
33
+ While the huge amount of “free” data available on the
34
+ Internet has been key to the success of deep learning and
35
+ computer vision, this has also raised public concerns on the
36
+ unauthorized exploitation of personal data uploaded to the
37
+ Internet to train commercial or even malicious models [16].
38
+ For example, a company named Clearview AI has been
39
+ found to have scraped billions of personal images from Face-
40
+ book, YouTube, Venmo and millions of other websites to
41
+ construct a commercial facial recognition application [44].
42
+ *Corresponding authors
43
+ Unconscious and unauthorized collection for training
44
+ Label-consistency
45
+ Label-agnostic
46
+ Original images ������������
47
+ Unlearnable image ������������′
48
+ Hacker
49
+ Protector
50
+ ������������-class surrogate
51
+ model ������������������������������������
52
+ =
53
+ car, dog, cat,
54
+ airplane, etc.
55
+ ...
56
+ ������������-class target
57
+ model ������������������������
58
+ ������������
59
+ car, dog, cat,
60
+ airplane, etc.
61
+ =
62
+ ������������-class target
63
+ model ������������������������
64
+ ������������
65
+ =
66
+ animal,
67
+ vehicle, etc.
68
+ ...
69
+ Hacker
70
+ Figure 1. An illustration of two different data protection assump-
71
+ tions: label-consistency vs. label-agnostic, where the hacker ex-
72
+ ploits the protected data in different manners.
73
+ This has motivated the proposal of Unlearnable Examples
74
+ (UEs) [17] to make data unlearnable (or unusable) to ma-
75
+ chine learning models/services. Similar techniques are also
76
+ known as availability attacks [2,41] or indiscriminate poi-
77
+ soning attacks [14] in the literature. These techniques allow
78
+ users to actively adding protective noise into their private
79
+ data to avoid unauthorized exploitation, rather than putting
80
+ our trust into the hands of large corporations.
81
+ The original UE generation method generates error-
82
+ minimizing noise via a bilevel min-min optimization frame-
83
+ work with a surrogate model [17]. The noise can then be
84
+ added to samples in a training set in either a sample-wise
85
+ or class-wise manner to make the entire dataset unlearnable
86
+ to different DNNs. It has been found that this method can-
87
+ not survive adversarial training, which has been addressed
88
+ by a recent method [11]. In this work, we identify one
89
+ common assumption made by existing UE methods: label-
90
+ consistency, where the hackers will exploit the protected
91
+ dataset in the same way as the protector including the labels.
92
+ This means that, for the same image, the hacker and protec-
93
+ tor hold the same label. We argue that this assumption is
94
+ too ideal, and it is possible that the hackers will collect the
95
+ protected (unlearnable) samples into a dataset for a different
96
+ task and label the dataset into different number of classes.
97
+ As illustrated in Figure 1, an image can be labelled with
98
+ 1
99
+ arXiv:2301.01217v1 [cs.CR] 31 Dec 2022
100
+
101
+ 盒盒000000different annotated labels (cat or animal), showing that a m-
102
+ class (e.g., 10-class) unlearnable dataset may be exploited
103
+ by the hacker as a n-class (e.g., 5-class or 20-class) dataset
104
+ depending on its actual needs. We term this more generic
105
+ assumption as label-agnostic and propose a novel method
106
+ Unlearnable Clusters (UCs) to generate more effective and
107
+ transferable unlearnable examples under this harsh setting.
108
+ In Figure 2 (a), we show that this more generic label-
109
+ agnostic setting poses a unique transferability challenge
110
+ for the noise generated by existing methods like Error-
111
+ Minimizing Noise (EMinN) [17], Adversarial Poisoning
112
+ (AdvPoison) [10], Synthetic Perturbations (SynPer) [41] and
113
+ DeepConfuse [9]. This indicates that the protective noise
114
+ generated by these methods are label-dependent and are ren-
115
+ dered ineffective when presented with different number of
116
+ classes. As such, we need more fundamental approaches
117
+ to make a dataset unlearnable regardless of the annotations.
118
+ To this end, we start by analyzing the working mechanism
119
+ of UEs generated by EMinN, AdvPoison as they are very
120
+ representative under the label-consistency setting. Through
121
+ a set of visual analyses, we find that the main reason why
122
+ they could break supervised learners is that the generated
123
+ noise tends to disrupts the distributional uniformity and dis-
124
+ crepancy in the deep representation space. Uniformity refers
125
+ to the property that the manifold of UEs in the deep rep-
126
+ resentation space does not deviate much from that of the
127
+ clean examples, while discrepancy refers to the property
128
+ that examples belonging to the same class are richly diverse
129
+ in the representation space. Inspired by the above obser-
130
+ vation, we propose a novel approach called Unlearnable
131
+ Clusters (UCs) to generate label-agnostic UEs using cluster-
132
+ wise (rather than class-wise) perturbations. This allows us
133
+ to achieve a simultaneous disruption of the uniformity and
134
+ discrepancy without knowing the label information.
135
+ Arguably, the choose of a proper surrogate model also
136
+ plays an important role in generating effective UEs. Previ-
137
+ ous methods generate UEs by directly attacking a surrogate
138
+ model and then transfer the generated UEs to fight against
139
+ a diverse set of target models [10,17]. This may be easily
140
+ achievable under the label-consistency setting, but may fail
141
+ badly under the label-agnostic setting. However, even un-
142
+ der the label-consistency setting, few works have studied
143
+ the impact of the surrogate model to the final unlearnable
144
+ performance. To generate effective, and more importantly,
145
+ transferable UEs under the label-agnostic setting, we need
146
+ to explore more generic surrogate model selection strategies,
147
+ especially those that can be tailored to a wider range of un-
148
+ known target models. Intuitively, the surrogate model should
149
+ be a classification DNN that contains as many classes as
150
+ possible so as to facilitate the recognition and protection of
151
+ billions of images on the Internet. In this paper, we propose
152
+ to leverage the large-scale Vision-and-Language Pre-trained
153
+ Models (VLPMs) [22,23,30] like CLIP [30] as the surrogate
154
+ model. Pre-trained on over 400 million text-to-image pairs,
155
+ CLIP has the power to extract the representation of extremely
156
+ diverse semantics. Meanwhile, VLPMs are pre-trained with
157
+ a textual description rather than a one-hot label to align with
158
+ the image, making them less overfit to the actual class “la-
159
+ bels”. In this work, we leverage the image encoder of CLIP
160
+ to extract the embeddings of the input images and then use
161
+ the embeddings to generate more transferable UCs.
162
+ We evaluate our UC approach with different backbones
163
+ and datasets, all in a black-box setting (the protector does
164
+ not know the attacker’s network architecture or the class
165
+ labels). Cluster-wise unlearnable noise can also prevent un-
166
+ supervised exploitation against contrastive learning to certain
167
+ extent, proving its superiority to existing UEs. We also com-
168
+ pare UC with existing UE methods against two commercial
169
+ machine learning platforms: Microsoft Azure1 and Baidu
170
+ PaddlePaddle2. To the best of our knowledge, this is the
171
+ first physical-world attack to commercial APIs in this line of
172
+ work.
173
+ Our main contributions are summarized as follows:
174
+ • We promote a more generic data protection assumption
175
+ called label-agnostic, which allows the hackers to ex-
176
+ ploit the protected dataset differently (in terms of the
177
+ annotated class labels) as the protector. This opens up
178
+ a more practical and challenging setting against unau-
179
+ thorized training of machine learning models.
180
+ • We reveal the working mechanism of existing UE gener-
181
+ ation methods: they all disrupt the distributional unifor-
182
+ mity and discrepancy in the deep representation space.
183
+ • We propose a novel approach called Unlearnable Clus-
184
+ ters (UCs) to generate label-agnostic UEs with cluster-
185
+ wise perturbations without knowing the label informa-
186
+ tion. We also leverage VLPMs like CLIP as the surro-
187
+ gate model to craft more transferable UCs.
188
+ • We empirically verify the effectiveness of our proposed
189
+ approach with different backbones on different datasets.
190
+ We also show its effectiveness in protecting private data
191
+ against commercial machine learning platforms Azure
192
+ and PaddlePaddle.
193
+ 2. Related Work
194
+ Unlearnable examples (UEs) can be viewed as one special
195
+ type of data poisoning attacks [1,2] that aim to make model
196
+ training fail completely on the poisoned (protected) dataset.
197
+ UEs should be differentiated from the other two well-known
198
+ attacks to deep learning models: backdoor attacks [5, 13,
199
+ 24] and adversarial attacks [12, 37]. Backdoor attacks are
200
+ the other special type of data poisoning attacks that do not
201
+ 1https://portal.azure.com/
202
+ 2https://www.paddlepaddle.org.cn/en/
203
+ 2
204
+
205
+ impact the model’s performance on clean data, which is in
206
+ sharp contrast to UEs. Adversarial attacks are one type of
207
+ test-time attacks that evade the model’s prediction by adding
208
+ small imperceptible adversarial noise to the inputs.
209
+ UEs can be generated via a min-min bilevel optimiza-
210
+ tion framework with a surrogate model [17], similar to
211
+ the generation of strong data poisons via bilevel optimiza-
212
+ tion [18, 34, 36, 45]. The generated noise is termed Error-
213
+ Minimizing Noise (EMinN) as it progressively eliminates
214
+ errors from the training data to trick the target model to be-
215
+ lieve there is nothing to learn [17]. We use EMinN to denote
216
+ the original UE generation method. In addition to EMinN,
217
+ there are also UE generation methods that utilize adversarial
218
+ noise, such as Error-Maximizing Noise (EMaxN) [19], Deep-
219
+ Confuse [9] and Adversarial Poisoning (AdvPoison) [10].
220
+ Recently, Yu et al. [41] unveil a linear-separability property
221
+ of unlearnable noise and propose the Synthetic Perturbations
222
+ (SynPer) method to directly synthesize linearly-separable
223
+ perturbations as effective unlearnable noise.
224
+ The original UE method EMinN has a few limitations.
225
+ First, the generated unlearnable noise can be removed to a
226
+ large extent by adversarial training [26], although this will
227
+ also decrease the model’s performance by a considerable
228
+ amount [17]. This was later on solved by a recent work
229
+ published at ICLR 2022 [11]. The idea is to optimize the
230
+ adversarial training loss in place of the standard training loss
231
+ to produce more robust error-minimizing noise. The other
232
+ limitation is its transferability to different training schemes,
233
+ target models (the models to protect against) or datasets.
234
+ For example, it has been found that unlearnable noise gen-
235
+ erated in a supervised manner fails to protect the dataset
236
+ from unsupervised contrastive learning [14]. A unsupervised
237
+ UE generation method was then proposed to craft UEs un-
238
+ learnable to unsupervised contrastive learning. However, a
239
+ very recent work by Ren et al. [32] demonstrates that, sur-
240
+ prisingly, unsupervised UEs cannot protect the dataset from
241
+ supervised exploitation. All above UE methods all rely on
242
+ the ideal label-consistency assumption, i.e., the same (or
243
+ no) labels for the protected data will be used by both the
244
+ protectors and hackers. In this paper, we promote a more
245
+ practical label-agnostic setting where different labels could
246
+ be used by the hackers for their own purposes.
247
+ Besides UEs, strong adversarial attacks have also been
248
+ proposed to protect personal data from malicious face recog-
249
+ nition systems, such as LowKey [6] and APF [44]. They
250
+ differ from UEs by making a normally trained model unable
251
+ to recognize the protected images, rather than preventing
252
+ the proper training of any machine learning models on the
253
+ protected images. In this work, we focus on UEs rather than
254
+ other data protection techniques which we believe are of
255
+ independent interest.
256
+ 3. Proposed Method
257
+ Threat Model.
258
+ We introduce two parties: the protector
259
+ and the hacker. The protectors leverage a surrogate model
260
+ to generate UEs for its private data before publishing it on
261
+ the Internet. For example, online social network companies
262
+ (or users) could convert their photos to their UE versions be-
263
+ fore posting them online. These “protected” images are then
264
+ collected, without the protectors’ consent, by a hacker into a
265
+ dataset to train a commercial or malicious model. The pro-
266
+ tectors’ goal is to make the collected dataset unlearnable, i.e.,
267
+ cannot be used for model training, while the hackers’ goal
268
+ is to train accurate models on the unlearnable (protected)
269
+ dataset. Following prior works [11,17,25], we assume the
270
+ released dataset is 100% protected, i.e., all the samples are
271
+ perturbed to be unlearnable. While this assumption appears
272
+ to be ideal, if the protection technique is reliable, there is no
273
+ reason not to employ it to gain more protection and privacy.
274
+ Therefore, in this work we choose to focus on the unlearn-
275
+ able technique itself rather than changing the setting of the
276
+ protectors. Following our label-agnostic setting, we also as-
277
+ sume the hackers could exploit the unlearnable dataset with
278
+ different labels. E.g., a m-class dataset could be exploited
279
+ by the hacker as a n-class dataset.
280
+ Here, we give an example of such label-agnostic scenario
281
+ with a online social media company who strives to protect
282
+ the contents created by all of its users. The company could
283
+ leverage unlearnable techniques to develop systematic pro-
284
+ tection scheme against unauthorized data explorers. In this
285
+ case, we can assume all the images uploaded by the users are
286
+ protected (by the company). Potential hackers like Clearview
287
+ AI may crawl the images from the online platform without
288
+ the users’ content into one or a set of datasets for its own
289
+ purposes. Thus, the collected datasets cannot be guaranteed
290
+ to have the same labels as their original versions. The pro-
291
+ tector thus needs to craft more powerful and transferable
292
+ unlearnable examples to make data unexploitable against
293
+ different labeling strategies.
294
+ 3.1. Problem Formulation
295
+ We focus on image classification tasks in this paper.
296
+ Given a clean m-class training dataset Dm
297
+ c = {(xi, yi)}k
298
+ i=1
299
+ consisting of k clean training images x ∈ X ⊂ Rd and their
300
+ labels y ∈ Y, in a standard unlearnable setting [17], the
301
+ protector trains an m-class surrogate model f m
302
+ s on Dm
303
+ c . The
304
+ protector can then generate an unlearnable version of the
305
+ dataset as Dm
306
+ u = {(x′
307
+ i, y′
308
+ i)}k
309
+ i=1, based on the clean dataset
310
+ Dm
311
+ c and the surrogate model f m
312
+ s . The unlearnable images
313
+ are denoted as x′ = x + δ (x ∈ Dm
314
+ c ) with the same labels
315
+ y ∈ Y as their original versions and δ ∈ ∆ ⊂ Rd are the
316
+ generated unlearnable noise which is often regularized to be
317
+ imperceptible. The unlearnable dataset Dm
318
+ u is assumed to be
319
+ the dataset collected by the hackers, and will be exploited
320
+ to train a commercial or malicious m-class target model f m
321
+ t
322
+ 3
323
+
324
+ EMinN
325
+ AdvPoison
326
+ SynPer
327
+ DeepConfuse
328
+ 0
329
+ 25
330
+ 50
331
+ 75
332
+ 100
333
+ Accuracy (%)
334
+ 19.93
335
+ 6.25
336
+ 13.54
337
+ 28.77
338
+ 93.77
339
+ 93.6
340
+ 93
341
+ 93.31
342
+ Label-consistency
343
+ Label-agnostic
344
+ (a)
345
+ Clean
346
+ EMinN
347
+ AdvPoison
348
+ (b)
349
+ Figure 2. (a) Current UE methods become ineffective in the label-
350
+ agnostic setting, even though they exhibit high effectiveness in the
351
+ label-consistency setting (under noise constraint ϵ = 8/255). (b)
352
+ A 3D feature visualization of clean CIFAR-10 examples and the
353
+ UEs derived by EMinN and AdvPoison. Points in the same color
354
+ denote samples of the same class.
355
+ without the protectors’ consent.
356
+ Label-consistency vs. Label-agnostic.
357
+ The above formu-
358
+ lation follows the standard label-consistency assumption of
359
+ previous works [11,17], where the hackers collect, annotate
360
+ and exploit the unlearnable dataset Dm
361
+ u exactly the same
362
+ as it was initially released by the protectors. Under a more
363
+ general and practical label-agnostic assumption, the hackers
364
+ could annotate the collected dataset Dm
365
+ u differently, e.g., as-
366
+ signing it with different number of classes. In this case, the
367
+ hackers may exploit the dataset as a n-class (n ̸= m) classi-
368
+ fication dataset Dn
369
+ c = {(x′
370
+ i, y′
371
+ i)}k
372
+ i=1 to train a n-class target
373
+ model f n
374
+ t . Note that the protectors have no knowledge of the
375
+ target class number n nor the target labels y′
376
+ i. Arguably, the
377
+ hackers may even exploit the dataset as an object detection
378
+ dataset rather than a classification dataset. We will explore
379
+ such a more challenging task-agnostic assumption in our
380
+ future work and focus on the label-agnostic in this work.
381
+ 3.2. The Label-agnostic Challenge
382
+ Existing methods are not robust to label-agnostic ex-
383
+ ploitation.
384
+ We test the effectiveness of existing unlearn-
385
+ able methods developed under the label-consistency set-
386
+ ting against label-agnostic hackers. Here we consider cur-
387
+ rent unlearnable method including Error-Minimizing Noise
388
+ (EMinN) [17], Adversarial Poisoning (AdvPoison) [10], Syn-
389
+ thetic Perturbations (SynPer) [41] and DeepConfuse [9], on
390
+ the CIFAR-10 dataset [21]. The ResNet-18 [15] models are
391
+ used for both the surrogate and target models. As shown
392
+ in Figure 2 (a), these methods are extremely effective in
393
+ preventing the training of machine learning models on the
394
+ unlearnable dataset with the same labels. However, if the un-
395
+ learnable dataset is crafted using ImageNet surrogate model
396
+ with the predicted ImageNet labels (i.e., labels predicted by
397
+ the surrogate model), it fails to prevent the model training
398
+ with the original CIFAR-10 labels. This indicates one unique
399
+ challenge of the label-agnostic setting: unlearnable noises
400
+ generated to prevent one set of labels are not transferable to
401
+ preventing other labeling strategies.
402
+ The working mechanism of existing UEs under the label-
403
+ consistency setting.
404
+ Here, we investigate the representa-
405
+ tions learned by the target model on clean vs. unlearnable
406
+ examples, aiming to gain more understanding of the un-
407
+ learnable mechanism. In Figure 2 (b), we visualize the
408
+ 3-dimensional PCA [39] projections of the original represen-
409
+ tations learned by the ResNet-18 target model for a) clean
410
+ CIFAR-10 training samples, b) unlearnable CIFAR-10 ex-
411
+ amples crafted by EMinN method, and 3) unlearnable (poi-
412
+ soned) CIFAR-10 examples crafted by AdvPoison. The rep-
413
+ resentations are extracted at the last convolutional layer and
414
+ projected using PCA from 512 to 3 dimensions. It shows in
415
+ Figure 2 (b) that the unlearnable examples crafted by EMinN
416
+ and AdvPoison tend to significantly reduce the variance at
417
+ certain dimensions. There are also classes that collapse into
418
+ smaller clusters, like the green class. This indicates that the
419
+ noise disrupts the distributional discrepancy in the repre-
420
+ sentation space to make the data “unlearnable”. The other
421
+ key observation is that the noise greatly shifts the points
422
+ away from the normal data manifold, causing an unneces-
423
+ sary spread over a certain direction. This indicates that the
424
+ noise also breaks the distributional uniformity of the data.
425
+ Overall, it is evident the unlearnable noise crafted by EMinN
426
+ and AdvPoison cripples the learning process by distorting
427
+ both the discrepancy and uniformity of the data distribution
428
+ in the deep representation space.
429
+ Unlearnable examples can overfit to the labels.
430
+ A closer
431
+ look at the visualizations in Figure 2 (b), one may notice
432
+ that the unlearning effects occur only within the classes. I.e.,
433
+ the UEs have overfitted to the class labels. This is somewhat
434
+ not surprising as the unlearnable noises are generated via
435
+ a supervised loss function (i.e., cross-entropy) defined by
436
+ the labels. The noise are thus optimized to thwart the most
437
+ predictive information to the class labels. However, this
438
+ causes the overfitting problem and fails to work if the labels
439
+ are changed. Intuitively, if we could remove the dependency
440
+ on the class labels and turn to exploit the clusters that natu-
441
+ rally arise during the learning process, we could make the
442
+ unlearnable noise more robust to different annotations.
443
+ 3.3. Unlearnable Clusters (UCs)
444
+ Overview.
445
+ Motivated by the above observations, in this
446
+ work we propose to generate UEs by exploiting the clus-
447
+ ters learned by a surrogate model and making the clusters
448
+ unlearnable instead of the labeled classes. We term this
449
+ approach as Unlearnable Clusters (UCs) and illustrate its
450
+ working follow in Figure 3. The key components of UC are
451
+ one generator model G and one surrogate model fs. At a
452
+ high level, UC first employs a surrogate model fs to extract
453
+ the representations E of all samples in the clean dataset Dc.
454
+ It then utilizes the K-means [35] clustering method to derive
455
+ p clusters from the representations E. Subsequently, for
456
+ 4
457
+
458
+ 10.0
459
+ 7.5
460
+ 5.0
461
+ 2.5
462
+ 0.0
463
+ 2.5
464
+ 5.0
465
+ 7.5
466
+ 2兴
467
+ 10.0
468
+ 7.5
469
+ 5.0
470
+ 2.5
471
+ 0.0
472
+ 2.5
473
+ 5.0
474
+ 7.510.0
475
+ 7.5
476
+ 5.0
477
+ 2.5
478
+ 0.0
479
+ 2.5
480
+ 5.0
481
+ 7.5
482
+ 2
483
+ 2
484
+ 2Generator ������������(�; ������������������������)
485
+ Uniform
486
+ Noise
487
+ ������������
488
+ Cluster-wise
489
+ Perturbation
490
+ ������������������������
491
+ Unlearnable
492
+ Image
493
+ ������������′
494
+ Surrogate Model ������������������������
495
+ Origianl
496
+ Image
497
+ ������������
498
+ ������������1
499
+ ������������2
500
+ ������������3
501
+ Feature Space
502
+ ������������1
503
+
504
+ Update ������������������������
505
+ Feature Space
506
+ K-means Initial
507
+ Minimize ������������DDU
508
+ ×: clustering center ������������������������������������
509
+ ×: clustering center ������������������������������������
510
+ ������������2
511
+
512
+ ������������3
513
+
514
+ Figure 3. The Unlearnable Clusters pipeline. The entire dataset is divided into p clusters via K-means clustering, where each cluster
515
+ corresponds to a certain generator with parameters θi and a cluster-wise perturbation δi.
516
+ each cluster, it generates a cluster-wise perturbation δi using
517
+ the generator G. The noise will be generated and applied to
518
+ craft the UE for each sample in Dc, with samples belonging
519
+ to the same cluster are added with the same cluster-wise
520
+ noise δi. UEs crafted in this manner can prevent the target
521
+ model from learning meaningful clusters rather than class
522
+ predictions, thus is more general to different types of label
523
+ exploitations. Next, we will introduce the details of UCs.
524
+ Cluster-wise Perturbations.
525
+ In our UC framework, one
526
+ encoder-decoder [29] generator network is used to gener-
527
+ ate the cluster-wise perturbations, with each generator will
528
+ be reinitialized for one cluster. As such, we need to ex-
529
+ tract the clusters first. Here, we leverage the most classic
530
+ clustering method K-means [35] to detect clusters from the
531
+ deep representations. Particularly, the clean dataset Dc is
532
+ fed into the surrogate model fs to extract the representation
533
+ matrix before the classification layer E = [e1, · · · , ek]. K-
534
+ means clustering is then applied on the representation matrix
535
+ to detect p number of clusters C = {C1, · · · , Cp}, where
536
+ Ci = {xij}τ(i)
537
+ j=1 = {xi1, · · · , xiτ(i)} and �p
538
+ i=1 τ(i) =
539
+ k. The corresponding centers for the clusters are µC =
540
+ {µC1, · · · , µCp}.
541
+ With the detected clusters C, we can now propose the
542
+ following method to generate the unlearnable noise for each
543
+ cluster. Intuitively, for cluster Ci, we hope the unlearnable
544
+ noise δi could move all samples in the cluster to a wrong
545
+ cluster center, so as to force the model to forget the cor-
546
+ rect clusters. This is done via the following minimization
547
+ framework:
548
+ θi = arg min
549
+ θi
550
+ LDDU(Ci, g(µCi), θi)
551
+ = arg min
552
+ θi
553
+
554
+ xij∈Ci
555
+ d(fs(xij + G(σ; θi)), g(µCi)),
556
+ (1)
557
+ where, LDDU is our proposed Disrupting Discrepancy and
558
+ Uniformity (DDU) loss that defines the distance (d(·)) of
559
+ samples in Ci to a permuted (wrong) cluster center by a per-
560
+ mutation function g(µCi); θi are the parameters of generator
561
+ network G; G(σ; θi)) generates the unlearnable noise for all
562
+ samples in Ci (i.e., xij ∈ Ci). Please note that the above
563
+ problem needs to be solved for p times to obtain the cluster-
564
+ wise unlearnable noise for all p clusters, and for each cluster,
565
+ the generator G is reinitialized with new parameters θi. The
566
+ complete procedure is described in Algorithm 1.
567
+ Algorithm 1 Unlearnable Cluster Generation
568
+ 1: Input: surrogate model fs, distance metric d, uniform
569
+ noise σ, number of clusters p, random permutation g,
570
+ L∞-norm restriction ϵ, clean images x ∈ Dc, initialized
571
+ generator G with parameters θ
572
+ 2: Output: cluster-wise perturbations δ = {δ1, · · · , δp}
573
+ 3: feature matrix E = fs(x)
574
+ 4: clusters and cluster centers {C, µC} = K-means(E, p)
575
+ 5: for i in 1 · · · p do
576
+ 6:
577
+ Initialize θi
578
+ 7:
579
+ δi = G(σ; θi)
580
+ 8:
581
+ δi = Clamp(δi, −ϵ, ϵ)
582
+ 9:
583
+ for xij in Ci do
584
+ 10:
585
+ x′
586
+ ij = Clamp(xij + δi, 0, 1)
587
+ 11:
588
+ θi ← Optimize(x′
589
+ ij, fs, g(µCi), d)
590
+ 12:
591
+ end for
592
+ 13:
593
+ δi = G(σ; θi)
594
+ 14:
595
+ δi = Clamp(δi, −ϵ, ϵ)
596
+ 15: end for
597
+ CLIP Surrogate Model.
598
+ How to choose a surrogate
599
+ model remains to be an independent challenge for gener-
600
+ 5
601
+
602
+ X
603
+ Xating effective cluster-wise unlearnable noise. As shown in
604
+ prior works, it plays a central role in facilitating the trans-
605
+ ferability of the generated UEs to different datasets or target
606
+ models [17]. In the traditional label-consistency setting, the
607
+ surrogate model can be a model that directly trained on the
608
+ original (unprotected) dataset, which may of a different (and
609
+ plausibly a better or more complex) model architecture. It
610
+ could also be a model that trained on a larger dataset with
611
+ more classes, e.g., ImageNet-trained models [10,17]. We
612
+ thus adopt an ImageNet-pretrained ResNet-50 as the default
613
+ surrogate model of our UC.
614
+ Analogous to the classification surrogate models used
615
+ for generating the traditional UEs, the ideal surrogate mod-
616
+ els for unlearnable clusters could be those powerful fea-
617
+ ture extractors that could lead to accurate detection of clus-
618
+ ters from an image dataset. We thus propose to also lever-
619
+ age one large-scale vision-and-language pre-trained model
620
+ (VLPM) [22, 23] CLIP [30] as our surrogate model. Pre-
621
+ trained on over 400 million text-to-image pairs, CLIP has
622
+ the power to extract the representation of extremely diverse
623
+ semantics. Moreover, CLIP was pre-trained with a textual de-
624
+ scription rather than a one-hot label to align with the image,
625
+ thus overfitting less to the actual class labels. Concretely,
626
+ we employ the image encoder of CLIP to extract the feature
627
+ matrix for the clean dataset, which is then used to compute
628
+ the clusters and cluster centers. We denote the version of
629
+ UC equipped with the CLIP surrogate model as UC-CLIP.
630
+ 4. Experiments
631
+ In this section, we evaluate our UCs methods on different
632
+ datasets against different target models, which is to simulate
633
+ as many unknown cases as possible. We also examine the ro-
634
+ bustness of UCs against several advanced defenses. Finally,
635
+ we demonstrate its effectiveness in attacking commercial
636
+ machine learning platforms Azure and PaddlePaddle.
637
+ 4.1. Experimental Settings
638
+ Datasets and Models.
639
+ We conduct our study on 6 high-
640
+ resolution and industrial-scale vision datasets to simulate
641
+ as diverse real-world applications as possible, including
642
+ Pets [28], Cars [20], Flowers [27], Food [3], SUN397 [40]
643
+ and ImageNet [33]. For ImageNet, we only use its first
644
+ 100 classes which is denoted as ImageNet⋆. For surrogate
645
+ models, we consider ResNet-50 trained on ImageNet-1k as
646
+ the default, unless otherwise explicitly stated. For target
647
+ models, we employ randomly initialized ResNet-18 [15],
648
+ EfficientNet-B1 [38] and RegNetX-1.6GF [31]. We train
649
+ the target models with data augmentations (resizing, ran-
650
+ dom crop, random horizontal flip and normalization) for 90
651
+ epochs, using SGD with initial learning rate 0.1, Cosine
652
+ annealing, momentum 0.9, and batch size 256.
653
+ For generator G, we repeated p times to train the generator
654
+ G for 10 epochs using SGD with initial learning rate 0.1,
655
+ 10
656
+ 20
657
+ 30
658
+ Number of class
659
+ 0
660
+ 20
661
+ 40
662
+ 60
663
+ Accuracy (%)
664
+ UC
665
+ UC-CLIP
666
+ random guess
667
+ clean
668
+ (a) Different labelings
669
+ Clean
670
+ SynPer
671
+ EMaxN
672
+ EMinN
673
+ AdvPoison
674
+ DeepConfuse
675
+ UC-CLIP
676
+ UC
677
+ 0
678
+ 10
679
+ 20
680
+ 30
681
+ 40
682
+ 50
683
+ Accuracy (%)
684
+ 48.3 46.8845.1948.1647.02
685
+ 24.37 26.11
686
+ 17.83
687
+ (b) Unsupervised exploitation
688
+ Figure 4. (a) The accuracy of ResNet-18 target models trained on
689
+ the unlearnable Pets dataset but with its labels were re-labeled by
690
+ the hacker into 5 to 35 classes. (b) Comparison of our approach
691
+ with the baselines on Pets dataset against ResNet-18 target model
692
+ trained via self-supervised SimCLR.
693
+ Cosine annealing, momentum 0.9, and batch size 256, and
694
+ 10 epochs for ImageNet⋆ and 50 epochs for other datasets.
695
+ For random permutation g(·), we simply chose i → i + 1 to
696
+ build a closed loop. We consider L∞-norm restriction in this
697
+ work, i.e., ∥δ∥∞ < ϵ = 16/255. The number of clusters p
698
+ is set to 10, with an analysis is provided in Section 4.5.
699
+ Baselines.
700
+ We compare our UC and UC-CLIP with 5 base-
701
+ line methods including DeepConfuse [9], Synthetic Perturba-
702
+ tions (SynPer) [41], Error-minimizing Noise (EMinN) [17],
703
+ Error-maximizing Noise(EMaxN) [19], and Adversarial Poi-
704
+ soning (AdvPoison) [10]. We use their official implementa-
705
+ tions and follow the suggested settings in the original papers
706
+ to generate the UEs or poisons.
707
+ Label-agnostic Setup.
708
+ Please note that we conduct all of
709
+ our experiments under the proposed label-agnostic setting.
710
+ The UCs (and the UEs they serve) are all generated with the
711
+ predicted labels by the surrogate models. The predicted la-
712
+ bels may overlap with the ground truth labels to some extent,
713
+ but are highly inconsistent with the original labels as the sur-
714
+ rogate models are not trained on the particular datasets. The
715
+ hackers train all target models on the unlearnable datasets
716
+ with their ground truth labels. We report the test accuracy of
717
+ the target models on the respective clean test sets.
718
+ 4.2. Main Results
719
+ Effectiveness against different target models.
720
+ We first
721
+ compare our UC and UC-CLIP with the 5 baselines against
722
+ different target models. Table 1 shows the results against
723
+ ResNet-18, EfficientNet-B1, and RegNetX-1.6GF. We have
724
+ the following main findings: (1) Our methods outperform
725
+ the baselines by a huge margin consistently across different
726
+ datasets and target models. This demonstrates the superi-
727
+ ority of our methods over the baselines. (2) Our UC-CLIP
728
+ achieves a better performance than UC, and in most of the
729
+ cases, by a considerable margin. This proves the great poten-
730
+ tial of using CLIP as the surrogate model to protect person
731
+ data from unauthorized exploitations.
732
+ 6
733
+
734
+ Table 1. The test accuracy (%) of different target models trained on the unlearnable datasets generated by our UC/UC-CLIP and the 5
735
+ baseline methods, under the label-agnostic setting. The top-2 best results are highlighted in bold.
736
+ RESNET-18
737
+ EFFICIENTNET-B1
738
+ REGNETX-1.6GF
739
+ METHODS
740
+ PETS CARS FLOWERS FOOD SUN397 IMAGENET⋆ PETS CARS FLOWERS FOOD SUN397 IMAGENET⋆ PETS CARS FLOWERS FOOD SUN397 IMAGENET⋆
741
+ CLEAN
742
+ 62.31 67.18
743
+ 67.18
744
+ 78.97
745
+ 43.08
746
+ 77.76
747
+ 48.68 72.33
748
+ 52.46
749
+ 80.29
750
+ 42.84
751
+ 78.04
752
+ 44.86 63.84
753
+ 52.69
754
+ 84.02
755
+ 43.27
756
+ 80.78
757
+ SYNPER
758
+ 52.60 53.50
759
+ 52.74
760
+ 74.80
761
+ 38.26
762
+ 74.69
763
+ 28.02 58.34
764
+ 42.93
765
+ 74.99
766
+ 35.92
767
+ 72.94
768
+ 34.51 45.54
769
+ 47.16
770
+ 77.65
771
+ 37.78
772
+ 60.38
773
+ EMAXN
774
+ 54.70 52.95
775
+ 51.70
776
+ 73.77
777
+ 37.57
778
+ 73.82
779
+ 33.71 55.64
780
+ 42.66
781
+ 74.40
782
+ 37.30
783
+ 73.72
784
+ 34.26 43.40
785
+ 46.25
786
+ 78.76
787
+ 37.82
788
+ 76.72
789
+ EMINN
790
+ 52.96 54.43
791
+ 50.58
792
+ 75.47
793
+ 38.48
794
+ 74.20
795
+ 36.88 54.23
796
+ 44.06
797
+ 75.54
798
+ 37.20
799
+ 72.20
800
+ 37.04 39.67
801
+ 47.34
802
+ 79.43
803
+ 36.82
804
+ 74.86
805
+ ADVPOISON
806
+ 50.86 51.91
807
+ 50.64
808
+ 75.07
809
+ 38.51
810
+ 73.76
811
+ 37.99 50.08
812
+ 41.65
813
+ 74.88
814
+ 36.44
815
+ 72.54
816
+ 34.29 46.06
817
+ 47.41
818
+ 78.64
819
+ 36.42
820
+ 76.32
821
+ DEEPCONFUSE
822
+ 53.72 51.11
823
+ 50.94
824
+ 73.13
825
+ 34.41
826
+ 55.12
827
+ 35.54 47.15
828
+ 43.28
829
+ 72.91
830
+ 35.22
831
+ 45.74
832
+ 33.71 41.15
833
+ 46.01
834
+ 77.26
835
+ 33.52
836
+ 49.88
837
+ UC (OURS)
838
+ 12.21 33.57
839
+ 35.55
840
+ 55.29 20.38
841
+ 54.80
842
+ 17.06 13.92
843
+ 42.28
844
+ 53.45 22.97
845
+ 32.30
846
+ 4.28 29.46
847
+ 33.79
848
+ 64.48 22.28
849
+ 56.10
850
+ UC-CLIP (OURS) 4.69
851
+ 4.74
852
+ 10.07
853
+ 19.07
854
+ 3.89
855
+ 39.78
856
+ 6.49 15.33
857
+ 14.13
858
+ 17.44 12.95
859
+ 31.82
860
+ 3.87
861
+ 4.18
862
+ 8.12
863
+ 26.76
864
+ 6.04
865
+ 41.66
866
+ Effectiveness Against Different Labelings.
867
+ An even
868
+ more challenging label-agnostic setting is that the hacker
869
+ may exploit the unlearnable dataset using different labeling
870
+ strategies instead of one. So, a natural question is that what
871
+ if the number of labeled classes of the unlearnable dataset
872
+ is less than our cluster number p = 10? Here, we take the
873
+ 37-class Pets dataset as an example and explore the impact
874
+ if the hacker re-labels the unlearnable version of the dataset
875
+ as a 5 to 36 class dataset. One possible labeling strategy is
876
+ that the hacker first extracts the embeddings of the original
877
+ text labels using the BERT model [8], and then clusters the
878
+ embeddings into 5-37 classes using K-means, so as to con-
879
+ struct a mapping from the old labels to the new labels. As
880
+ shown in Figure 4 (a), both our UC and UC-CLIP can bring
881
+ the test accuracy of the target model down to a level that is
882
+ close the random guess (the black curve). This verifies that
883
+ our methods can craft more generic UEs against the most
884
+ severe label-agnostic exploitations.
885
+ Robustness to Unsupervised Exploitation.
886
+ We also com-
887
+ pare our methods with the baselines under an unsupervised
888
+ contrastive learning setting against SimCLR [4]. Although
889
+ our UC methods are not specifically designed for this un-
890
+ supervised setting, Figure 4 (b) shows that cluster-wise un-
891
+ learnable noise can also prevent unsupervised exploitation
892
+ against SimCLR.
893
+ 4.3. Preventing Commercial Platforms
894
+ Here, we apply our UC methods to prevent two com-
895
+ mercial machine learning platforms: Microsoft Azure and
896
+ Baidu PaddlePaddle. On both platforms, the training
897
+ details are agnostic to us, including the model architecture,
898
+ learning rate, batch size, epoch, data augmentation, splitting
899
+ of the validation set, etc. Considering that ViT may be used
900
+ on commercial platforms due to its recent popularity, we
901
+ upgrade our UC-CLIP method by replacing the ResNet-50
902
+ (RN50) surrogate model by a ViT-B-32 (ViTB32) surrogate
903
+ model. The results are reported in Table 2, which are consis-
904
+ Table 2. The test accuracy (%) of models trained by Azure and
905
+ PaddlePaddle platforms on unlearnable Cars dataset crafted by
906
+ different methods. The training configuration on the platform was
907
+ set to “fastest training”.
908
+ METHODS
909
+ Azure
910
+ PaddlePaddle
911
+ CLEAN
912
+ 48.45
913
+ 83.74
914
+ SYNPER
915
+ 42.38
916
+ 47.59
917
+ EMAXN
918
+ 42.83
919
+ 42.99
920
+ EMINN
921
+ 44.06
922
+ 44.40
923
+ ADVPOISON
924
+ 43.97
925
+ 43.38
926
+ DEEPCONFUSE
927
+ 39.47
928
+ 41.88
929
+ UC (RN50)
930
+ 36.40
931
+ 30.96
932
+ UC-CLIP (RN50)
933
+ 26.97
934
+ 25.79
935
+ UC-CLIP (VITB32)
936
+ 22.47
937
+ 11.49
938
+ tent with that in Table 1. I.e., both of our methods can protect
939
+ the data uploaded to the two platforms against their training
940
+ algorithms. Unsurprisingly, the ViTB32-powered UC-CLIP
941
+ method achieves the best protection performance by causing
942
+ the lowest test accuracy. This suggests the effectiveness of
943
+ our methods even against commercial platforms.
944
+ 4.4. Resistance to Potential Defenses
945
+ In this section, we test the robustness of our UC
946
+ methods to several augmentation based defenses, includ-
947
+ ing Mixup [43], Gaussian smoothing, Cutmix [42] and
948
+ Cutout [7]. We did not consider adversarial training here is
949
+ that the images involved in this paper are generally large in
950
+ size, number and resolution, making adversarial training ex-
951
+ tremely hard to converge without losing considerable clean
952
+ accuracy on most of the datasets [26]. Compared with ad-
953
+ versarial training, we believe that a more practical defense
954
+ should be very efficient, such as image denoising or the con-
955
+ sidered augmentation techniques. As can be observed in
956
+ Table 3, the 4 data augmentation defenses have minimum
957
+ impact on our UC and UC-CLIP methods. Particularly, Gaus-
958
+ sian smoothing appears to be the most effective defense, but
959
+ the accuracy is still below 25%.
960
+ 7
961
+
962
+ Table 3. The test accuracy (%) of ResNet-18 trained using different
963
+ defenses against our methods on Pets dataset.
964
+ METHODS
965
+ NO DEFENSE
966
+ MIXUP
967
+ GAUSSIAN
968
+ CUTMIX
969
+ CUTOUT
970
+ UC
971
+ 12.21
972
+ 14.34
973
+ 24.26
974
+ 14.50
975
+ 12.35
976
+ UC-CLIP
977
+ 4.69
978
+ 11.96
979
+ 18.59
980
+ 6.21
981
+ 12.29
982
+ 4.5. Ablation Study
983
+ 5
984
+ 10 15 20 25 30 35 40
985
+ Number of clusters
986
+ 0
987
+ 5
988
+ 10
989
+ 15
990
+ Accuracy (%)
991
+ 16.08
992
+ 12.21
993
+ 4.42 3.82 3.46 2.75
994
+ 4.51 3.92
995
+ (a) Effect of p on UC
996
+ 5
997
+ 10 15 20 25 30 35 40
998
+ Number of clusters
999
+ 0
1000
+ 5
1001
+ 10
1002
+ 15
1003
+ Accuracy (%)
1004
+ 6.38
1005
+ 4.69
1006
+ 3.11 3.33 3.33 2.73
1007
+ 4.09
1008
+ 2.67
1009
+ (b) Effect of p on UC-CLIP
1010
+ Figure 5. Analyzing the effect of cluster number p on Pets dataset.
1011
+ Here, we analyze the sensitivity of our methods to the
1012
+ number of clusters p, which has been set to p = 10 as a
1013
+ default. We take the 37-class Pets dataset as an example
1014
+ and evaluate our UC and UC-CLIP method under different
1015
+ values of p ∈ [5, 40]. As shown in Figure 5, our methods are
1016
+ quite stable to varying hyperparameter p for p ≥ 10. This
1017
+ indicates that, as long as the clusters can cover most of the
1018
+ concepts in a dataset, the generated unlearnable noise can
1019
+ effectively prevent the model from learning the real content
1020
+ from the dataset. As the number of clusters increases, the
1021
+ noise tends to become more effective, although there is a
1022
+ slight variation at 35. Note that, even in the worst case at
1023
+ p = 5, our methods still outperform the baselines.
1024
+ 4.6. Mixture of Clean and Unlearnable Data
1025
+ 0
1026
+ 5
1027
+ 10 15 20 25 30 35
1028
+ Number of classes
1029
+ 0
1030
+ 10
1031
+ 20
1032
+ 30
1033
+ 40
1034
+ 50
1035
+ 60
1036
+ Accuracy (%)
1037
+ clean-only
1038
+ mixture
1039
+ (a) Mixture vs. Clean-only
1040
+ 0
1041
+ 25
1042
+ 50
1043
+ 75 100 125 150
1044
+ Epoch
1045
+ 0
1046
+ 20
1047
+ 40
1048
+ 60
1049
+ 80
1050
+ 100
1051
+ Accuracy (%)
1052
+ training (unlearnable)
1053
+ test (clean)
1054
+ test (unlearnable)
1055
+ (b) Accuracy trends
1056
+ Figure 6.
1057
+ (a) The test accuracy (%) of ResNet-18 trained on
1058
+ unlearnable-clean mixed vs. clean-only data; and (b) the accu-
1059
+ racy trends on clean vs. unlearnable examples. The unlearnable
1060
+ examples are crafted using our UC method on Pets dataset.
1061
+ All the above experiments are conducted under the as-
1062
+ sumption that all samples in the dataset are protected, a
1063
+ commonly adopted assumption in the literature [10,17,41].
1064
+ This setting is reasonable when the protectors have the access
1065
+ to the entire dataset, e.g., an online social media company
1066
+ adopts the technique to protect the contents created by all
1067
+ of its users. A more general case is that only a certain pro-
1068
+ portion of the users protect their data while others do not.
1069
+ This results in mixed dataset with both clean and unlearnable
1070
+ samples. Here we test our UC method under this setting
1071
+ and show the change in test accuracy with the number of
1072
+ clean classes in Figure 6 (a). I.e., for the mixture dataset,
1073
+ the rest of the classes are made unlearnable by UC. It can be
1074
+ inferred that the unlearnable classes almost do not contribute
1075
+ to the model training, a similar conclusion as in previous
1076
+ works [10,17,41]. This implies that only those who adopt
1077
+ the technique will get protected.
1078
+ 4.7. More Understanding
1079
+ Why our UCs are more powerful than standard UEs
1080
+ against label-agnostic exploitation? As we explained in
1081
+ Section 3.1, the idea of UCs is inspired by the effectiveness
1082
+ of disrupting the uniformity and discrepancy in preventing
1083
+ the model from learning useful information. However, this
1084
+ also raises another question: what exactly does the target
1085
+ model learn? To answer these two questions, here we ana-
1086
+ lyze the learning curves of the target model on the clean vs.
1087
+ unlearnable examples separately. As shown in Figure 6 (b),
1088
+ as the training progresses, the training accuracy on the un-
1089
+ learnable training samples steadily improves until it reaches
1090
+ 100%. But there is almost no improvement in the clean test
1091
+ accuracy on the clean test samples. This is consistent with
1092
+ the the above experimental results that the target model has
1093
+ not learned the capability to perceive normal samples. Sur-
1094
+ prisingly, however, the model’s accuracy on the perturbed
1095
+ test samples is fairly high (> 60%), considering that the
1096
+ normally trained ResNet-18 only achieves a test accuracy of
1097
+ 62.31% on clean Pets dataset. This implies that the unlearn-
1098
+ able noise distribution contained in the UCs has effectively
1099
+ concealed the real data distribution.
1100
+ 5. Conclusion
1101
+ Unlearnable examples (UEs) have shown great potential
1102
+ in preventing hackers from using users’ private data to train
1103
+ commercial or malicious models. A number of methods
1104
+ have been proposed to improve UEs’ transferability and ro-
1105
+ bustness to different datasets, target models and training
1106
+ paradigms. In this work, we identified one limitation of ex-
1107
+ isting UE methods, i.e., their label-consistency assumption.
1108
+ To overcome this limitation, we proposed a more general
1109
+ setting where the hackers could exploit the protected data
1110
+ with different sets of labels. We termed this more challeng-
1111
+ ing setting as label-agnostic, and proposed an Unlearnable
1112
+ Clusters (UCs) technique with conditioned generator models,
1113
+ K-means clustering, and large-scale vision-and-language pre-
1114
+ training model CLIP, to craft effective UEs against a wide
1115
+ range of datasets and target models. We also demonstrate
1116
+ its effectiveness against commercial platforms Microsoft
1117
+ Azure and Baidu PaddlePaddle.
1118
+ 8
1119
+
1120
+ References
1121
+ [1] Battista Biggio, Blaine Nelson, and Pavel Laskov. Poison-
1122
+ ing attacks against support vector machines. arXiv preprint
1123
+ arXiv:1206.6389, 2012. 2
1124
+ [2] Battista Biggio and Fabio Roli. Wild patterns: Ten years after
1125
+ the rise of adversarial machine learning. Pattern Recognition,
1126
+ 84:317–331, 2018. 1, 2
1127
+ [3] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool.
1128
+ Food-101–mining discriminative components with random
1129
+ forests. In European conference on computer vision, pages
1130
+ 446–461. Springer, 2014. 6
1131
+ [4] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Ge-
1132
+ offrey Hinton. A simple framework for contrastive learning
1133
+ of visual representations. In International conference on
1134
+ machine learning, pages 1597–1607. PMLR, 2020. 7
1135
+ [5] Xinyun Chen, Chang Liu, Bo Li, Kimberly Lu, and Dawn
1136
+ Song. Targeted backdoor attacks on deep learning systems
1137
+ using data poisoning. arXiv preprint arXiv:1712.05526, 2017.
1138
+ 2
1139
+ [6] Valeriia Cherepanova, Micah Goldblum, Harrison Foley,
1140
+ Shiyuan Duan, John Dickerson, Gavin Taylor, and Tom Gold-
1141
+ stein. Lowkey: Leveraging adversarial attacks to protect
1142
+ social media users from facial recognition. arXiv preprint
1143
+ arXiv:2101.07922, 2021. 3
1144
+ [7] Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasude-
1145
+ van, and Quoc V Le. Autoaugment: Learning augmentation
1146
+ policies from data. arXiv preprint arXiv:1805.09501, 2018. 7
1147
+ [8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina
1148
+ Toutanova.
1149
+ Bert:
1150
+ Pre-training of deep bidirectional
1151
+ transformers for language understanding.
1152
+ arXiv preprint
1153
+ arXiv:1810.04805, 2018. 7
1154
+ [9] Ji Feng, Qi-Zhi Cai, and Zhi-Hua Zhou. Learning to confuse:
1155
+ generating training time adversarial data with auto-encoder.
1156
+ Advances in Neural Information Processing Systems, 32, 2019.
1157
+ 2, 3, 4, 6
1158
+ [10] Liam Fowl, Micah Goldblum, Ping-yeh Chiang, Jonas Geip-
1159
+ ing, Wojciech Czaja, and Tom Goldstein. Adversarial exam-
1160
+ ples make strong poisons. Advances in Neural Information
1161
+ Processing Systems, 34:30339–30351, 2021. 2, 3, 4, 6, 8
1162
+ [11] Shaopeng Fu, Fengxiang He, Yang Liu, Li Shen, and Dacheng
1163
+ Tao. Robust unlearnable examples: Protecting data against
1164
+ adversarial learning. In International Conference on Learning
1165
+ Representations, 2022. 1, 3, 4
1166
+ [12] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy.
1167
+ Explaining and harnessing adversarial examples.
1168
+ arXiv
1169
+ preprint arXiv:1412.6572, 2014. 2
1170
+ [13] Tianyu Gu, Brendan Dolan-Gavitt, and Siddharth Garg. Bad-
1171
+ nets: Identifying vulnerabilities in the machine learning
1172
+ model supply chain. arXiv preprint arXiv:1708.06733, 2017.
1173
+ 2
1174
+ [14] Hao He, Kaiwen Zha, and Dina Katabi. Indiscriminate poi-
1175
+ soning attacks on unsupervised contrastive learning. arXiv
1176
+ preprint arXiv:2202.11202, 2022. 1, 3
1177
+ [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
1178
+ Deep residual learning for image recognition. In Proceed-
1179
+ ings of the IEEE conference on computer vision and pattern
1180
+ recognition, pages 770–778, 2016. 4, 6
1181
+ [16] Kashmir Hill. The secretive company that might end privacy
1182
+ as we know it. In Ethics of Data and Analytics, pages 170–
1183
+ 177. Auerbach Publications, 2020. 1
1184
+ [17] Hanxun Huang, Xingjun Ma, Sarah Monazam Erfani, James
1185
+ Bailey, and Yisen Wang. Unlearnable examples: Making
1186
+ personal data unexploitable. In International Conference on
1187
+ Learning Representations, 2021. 1, 2, 3, 4, 6, 8
1188
+ [18] W Ronny Huang, Jonas Geiping, Liam Fowl, Gavin Taylor,
1189
+ and Tom Goldstein. Metapoison: Practical general-purpose
1190
+ clean-label data poisoning. Advances in Neural Information
1191
+ Processing Systems, 33:12080–12091, 2020. 3
1192
+ [19] Pang Wei Koh and Percy Liang. Understanding black-box
1193
+ predictions via influence functions. In International confer-
1194
+ ence on machine learning, pages 1885–1894. PMLR, 2017.
1195
+ 3, 6
1196
+ [20] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d
1197
+ object representations for fine-grained categorization. In Pro-
1198
+ ceedings of the IEEE international conference on computer
1199
+ vision workshops, pages 554–561, 2013. 6
1200
+ [21] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple
1201
+ layers of features from tiny images. 2009. 4
1202
+ [22] Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq
1203
+ Joty, Caiming Xiong, and Steven Chu Hong Hoi.
1204
+ Align
1205
+ before fuse: Vision and language representation learning
1206
+ with momentum distillation. Advances in neural information
1207
+ processing systems, 34:9694–9705, 2021. 2, 6
1208
+ [23] Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh,
1209
+ and Kai-Wei Chang.
1210
+ Visualbert: A simple and perfor-
1211
+ mant baseline for vision and language.
1212
+ arXiv preprint
1213
+ arXiv:1908.03557, 2019. 2, 6
1214
+ [24] Yunfei Liu, Xingjun Ma, James Bailey, and Feng Lu. Reflec-
1215
+ tion backdoor: A natural backdoor attack on deep neural net-
1216
+ works. In European Conference on Computer Vision, pages
1217
+ 182–199. Springer, 2020. 2
1218
+ [25] Zhuoran Liu, Zhengyu Zhao, Alex Kolmus, Tijn Berns, Twan
1219
+ van Laarhoven, Tom Heskes, and Martha Larson. Going
1220
+ grayscale: The road to understanding and improving unlearn-
1221
+ able examples. arXiv preprint arXiv:2111.13244, 2021. 3
1222
+ [26] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt,
1223
+ Dimitris Tsipras, and Adrian Vladu. Towards deep learn-
1224
+ ing models resistant to adversarial attacks. arXiv preprint
1225
+ arXiv:1706.06083, 2017. 3, 7
1226
+ [27] Maria-Elena Nilsback and Andrew Zisserman. Automated
1227
+ flower classification over a large number of classes. In 2008
1228
+ Sixth Indian Conference on Computer Vision, Graphics &
1229
+ Image Processing, pages 722–729. IEEE, 2008. 6
1230
+ [28] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and
1231
+ CV Jawahar. Cats and dogs. In 2012 IEEE conference on
1232
+ computer vision and pattern recognition, pages 3498–3505.
1233
+ IEEE, 2012. 6
1234
+ [29] Omid Poursaeed, Isay Katsman, Bicheng Gao, and Serge
1235
+ Belongie. Generative adversarial perturbations. In Proceed-
1236
+ ings of the IEEE Conference on Computer Vision and Pattern
1237
+ Recognition, pages 4422–4431, 2018. 5
1238
+ [30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya
1239
+ Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry,
1240
+ Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning
1241
+ 9
1242
+
1243
+ transferable visual models from natural language supervision.
1244
+ In International Conference on Machine Learning, pages
1245
+ 8748–8763. PMLR, 2021. 2, 6
1246
+ [31] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaim-
1247
+ ing He, and Piotr Doll´ar. Designing network design spaces. In
1248
+ Proceedings of the IEEE/CVF conference on computer vision
1249
+ and pattern recognition, pages 10428–10436, 2020. 6
1250
+ [32] Jie Ren, Han Xu, Yuxuan Wan, Xingjun Ma, Lichao Sun,
1251
+ and Jiliang Tang. Transferable unlearnable examples. arXiv
1252
+ preprint arXiv:2210.10114, 2022. 3
1253
+ [33] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, San-
1254
+ jeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,
1255
+ Aditya Khosla, Michael Bernstein, et al. Imagenet large
1256
+ scale visual recognition challenge. International journal of
1257
+ computer vision, 115(3):211–252, 2015. 6
1258
+ [34] Avi Schwarzschild, Micah Goldblum, Arjun Gupta, John P
1259
+ Dickerson, and Tom Goldstein. Just how toxic is data poison-
1260
+ ing? a unified benchmark for backdoor and data poisoning
1261
+ attacks. In International Conference on Machine Learning,
1262
+ pages 9389–9398. PMLR, 2021. 3
1263
+ [35] Shokri Z Selim and Mohamed A Ismail. K-means-type al-
1264
+ gorithms: A generalized convergence theorem and charac-
1265
+ terization of local optimality. IEEE Transactions on pattern
1266
+ analysis and machine intelligence, (1):81–87, 1984. 4, 5
1267
+ [36] Ali Shafahi, W Ronny Huang, Mahyar Najibi, Octavian Su-
1268
+ ciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein.
1269
+ Poison frogs! targeted clean-label poisoning attacks on neural
1270
+ networks. Advances in neural information processing systems,
1271
+ 31, 2018. 3
1272
+ [37] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan
1273
+ Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus.
1274
+ Intriguing properties of neural networks.
1275
+ arXiv preprint
1276
+ arXiv:1312.6199, 2013. 2
1277
+ [38] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model
1278
+ scaling for convolutional neural networks. In International
1279
+ conference on machine learning, pages 6105–6114. PMLR,
1280
+ 2019. 6
1281
+ [39] Svante Wold, Kim Esbensen, and Paul Geladi. Principal
1282
+ component analysis. Chemometrics and intelligent laboratory
1283
+ systems, 2(1-3):37–52, 1987. 4
1284
+ [40] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva,
1285
+ and Antonio Torralba. Sun database: Large-scale scene recog-
1286
+ nition from abbey to zoo. In 2010 IEEE computer society
1287
+ conference on computer vision and pattern recognition, pages
1288
+ 3485–3492. IEEE, 2010. 6
1289
+ [41] Da Yu, Huishuai Zhang, Wei Chen, Jian Yin, and Tie-Yan
1290
+ Liu. Availability attacks create shortcuts. In Proceedings of
1291
+ the 28th ACM SIGKDD Conference on Knowledge Discovery
1292
+ and Data Mining, pages 2367–2376, 2022. 1, 2, 3, 4, 6, 8
1293
+ [42] Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk
1294
+ Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regu-
1295
+ larization strategy to train strong classifiers with localizable
1296
+ features. In Proceedings of the IEEE/CVF international con-
1297
+ ference on computer vision, pages 6023–6032, 2019. 7
1298
+ [43] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David
1299
+ Lopez-Paz.
1300
+ mixup: Beyond empirical risk minimization.
1301
+ arXiv preprint arXiv:1710.09412, 2017. 7
1302
+ [44] Jiaming Zhang, Jitao Sang, Xian Zhao, Xiaowen Huang, Yan-
1303
+ feng Sun, and Yongli Hu. Adversarial privacy-preserving
1304
+ filter. In Proceedings of the 28th ACM International Confer-
1305
+ ence on Multimedia, pages 1423–1431, 2020. 1, 3
1306
+ [45] Chen Zhu, W Ronny Huang, Hengduo Li, Gavin Taylor,
1307
+ Christoph Studer, and Tom Goldstein. Transferable clean-
1308
+ label poisoning attacks on deep neural nets. In International
1309
+ Conference on Machine Learning, pages 7614–7623. PMLR,
1310
+ 2019. 3
1311
+ 10
1312
+
1dAzT4oBgHgl3EQfRfuL/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
1tA0T4oBgHgl3EQfMv-f/content/2301.02137v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc7c7870ce42836a7ad5199ffefa13139e475150b50a85b1f0bed620295e42f6
3
+ size 357589
1tA0T4oBgHgl3EQfMv-f/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be7d6799e359638aa99a85111f5b489a11ef3b0f24e05b624c45506721e9797
3
+ size 4063277
1tA0T4oBgHgl3EQfMv-f/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a632ceb0dff8eb6b5c9cb35b1b06623711cb2480cc1b1662d07008d79af2ae7
3
+ size 143046
29E1T4oBgHgl3EQf5wUG/content/tmp_files/2301.03514v1.pdf.txt ADDED
@@ -0,0 +1,1100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Strong Collapse of Random Simplicial Complexes
2
+ Jean-Daniel Boissonnat ∗ 1, Kunal Dutta † 2, Soumik Dutta † 3, and Siddharth Pritam ∗ 4
3
+ 1Universit´e Cˆote d’Azur, INRIA, Sophia Antipolis, France. email:
4
5
+ 2Faculty of Mathematics, Informatics and Mechanics, University of Warsaw, Poland.
6
7
+ 3Faculty of Mathematics, Informatics, and Mechanics, University of Warsaw, Poland.
8
9
+ 4Universit´e Cˆote d’Azur, INRIA, Sophia Antipolis, France. email:
10
11
+ Abstract
12
+ The strong collapse of a simplicial complex, proposed by Barmak and Minian [6], is a
13
+ combinatorial collapse of a complex onto its sub-complex. Recently, it has received attention
14
+ from computational topology researchers [22, 7, 8], owing to its empirically observed useful-
15
+ ness in simplification and size-reduction of the size of simplicial complexes while preserving
16
+ the homotopy class. We consider the strong collapse process on random simplicial complexes.
17
+ For the Erd˝os-R´enyi random clique complex X(n, c/n) on n vertices with edge probability
18
+ c/n with c > 1, we show that after any maximal sequence of strong collapses the remaining
19
+ subcomplex, or core must have (1 − γ)(1 − cγ)n + o(n) vertices asymptotically almost surely
20
+ (a.a.s.), where γ is the least non-negative fixed point of the function f(x) = exp (−c(1 − x))
21
+ in the range (0, 1). These are the first theoretical results proved for strong collapses on
22
+ random (or non-random) simplicial complexes.
23
+ 1
24
+ Introduction
25
+ Motivation
26
+ Simple collapse is a combinatorial notion which simplifies a simplicial complex
27
+ without changing its topology. It can be expressed as a series of elementary moves of removals of
28
+ pair of simplices σ and τ, such that σ is uniquely contained in τ. The notion of simple collapse
29
+ was introduced by J.H.C Whitehead [21] to study homotopy types of cell complexes. Since then
30
+ it has found usage in many different areas of topology, especially in computational topology.
31
+ Recently new variants of simple collapses have been introduced, called strong collapses and more
32
+ generally d-collapses [6, 8, 5]. In such collapses one removes special vertices (more generally
33
+ d-simplices) called dominated vertices (simplices) whose link is a simplicial cone. It’s again
34
+ expressed as a series of elementary moves of removals of dominated vertices (simplices). They
35
+ have been shown to be very powerful tools to solve many problems in computational topology.
36
+ In particular, the recent works of Pritam et. al. [9, 8, 23] has shown that strong collapses and
37
+ edge collapses (d-collapse for d = 1) can be used for efficient computation of one parameter and
38
+ ∗J.-D. Boissonnat and S. Pritam received funding from the European Research Council (ERC) under the
39
+ European Union’s Seventh Framework Programme (FP/2007- 2013) / ERC Grant Agreement No. 339025 GUDHI
40
+ (Algorithmic Foundations of Geometry Understanding in Higher Dimensions).
41
+ †K. Dutta and S. Dutta received funding from the Polish NCN SONATA Grant no. 2019/35/D/ST6/04525.
42
+ 1
43
+ arXiv:2301.03514v1 [cs.CG] 9 Jan 2023
44
+
45
+ multi-parameter persistence. Efficient computation of persistent homology is one of the central
46
+ topic of research in topological data analysis.
47
+ The computation of persistent homology involves computing homology groups of a nested
48
+ sequence of simplicial complexes called filtrations. And to compute persistent homology requires
49
+ O(n3) time and O(n2) space, here n is the total number of simplices in the filtration. The
50
+ general technique developed in [9, 7, 8, 12, 23] is to reduce a filtration to a smaller filtration
51
+ using strong or edge collapse such that the persistent homology is preserved. In [9, 7, 8, 12, 23],
52
+ it has been established through experiments that in practice the reduced filtrations are very
53
+ small and thereafter computation of persistent homology is extremely fast. The gain in efficiency
54
+ is quite dramatic in the case of flag (clique) complexes, where the strong collapse and edge
55
+ collapse can be computed using only the graph (1-skeleton) of the given complex [7, 12, 23].
56
+ As mentioned above the efficiency reported in [9, 7, 12, 23] are through experiments and
57
+ there is no theoretical guarantee over the reduction size. This is due to the fact that in general
58
+ the amount of reduction depends on the individual complex and its combinatorial structure. In
59
+ fact, the reduction is dependent even on the order of the collapses and a different order can
60
+ result in a different core, except in the case of strong collapse. Which is even harder when we
61
+ want study the reduction size in a filtered simplicial complexes. This motivates us to consider
62
+ the case of random simplicial complexes and study the average reduction size by collapses.
63
+ In this article, we study the problem of reduction size achieved by the strong collapses of a
64
+ clique complex defined over an Erd˝os-R´enyi random graph.
65
+ Previous
66
+ The study of random simplicial complexes was initiated in the seminal paper of
67
+ Linial and Meshulam [16]. Later Meshulam and Wallach [19] generalized the model of random
68
+ complexes to obtain the Linial-Meshulam (LM) model of d-dimensional random complexes.
69
+ Since then a large body of work from several authors has emerged on many different models
70
+ of random simplicial complexes, studying various topological and geometric properties of such
71
+ complexes [14]. The study of simple collapses for random simplicial complexes has also been of
72
+ interest to researchers and there have been numerous works in this direction. In the d-dimensional
73
+ LM model, Kozlov [15] proved bounds on the threshold for vanishing of the d-th homology.
74
+ Simple collapses on random complexes were first studied by Aronshtam, Linial, �Luczak and
75
+ Meshulam [4], who improved Kozlov’s bound to get a tight bound on the threshold and also gave
76
+ a bound on the threshold for collapsibility in the d-dimensional LM model. Later Aronshtam
77
+ and Linial [2, 3] extended this line of work, obtaining first the threshold for the vanishing of
78
+ the d-th homology in [2] and then the threshold for non-collapsibility of the d-dimensional LM
79
+ complex [3]. In [17] Linial and Peled obtained precise asymptotic bounds on the size of the core
80
+ of such complexes. Very recently, Malen [18] has shown that the ER clique complex X(n, p) is
81
+ (k + 1)-collapsible with high probability for p = n−α when α > 1/(k + 1).
82
+ Thus, to the best of our knowledge, work on collapses in random complexes has so far
83
+ considered only simple collapses.
84
+ Throughout this paper, we shall use the notation asymptotically almost surely (a.a.s.) for a
85
+ series of events (En)n≥1, when the probability of occurence of En goes to 1 as n → ∞.
86
+ Models of Random Simplicial Complexes
87
+ In this paper we shall consider two models of
88
+ random simplicial complexes, which are described below. For a graph G, let cl(G) denotes the
89
+ clique (flag) complex on G, i.e. the simplicial complex where each complete subgraph of G
90
+ on d vertices is a d − 1-simplex in cl(G). The Erd˝os-R´enyi (ER) model X(n, p) on n vertices
91
+ with probability parameter p ∈ [0, 1] is given by connecting each possible pair of elements of an
92
+ n-element set by an edge randomly and independently with probability p to get the random
93
+ graph G = G(n, p). Let X(n, p) := cl(G(n, p)).
94
+ 2
95
+
96
+ Our Contribution
97
+ We give bounds on the size of the core (i.e. the smallest sub complex
98
+ without any dominated vertex) of a random simplicial complex after strong (vertex) collapses.
99
+ Whereas previous works focused on computing the threshold for the appearance and disappearance
100
+ of the k-th homology class for different k, here we are more interested in computing the size of
101
+ the core. We show that for n-vertex ER clique complexes, the size of the core after a maximal
102
+ series of strong collapses is a.a.s. a constant fraction of n, with the constant depending only on
103
+ the edge probability, and bounded away from 1. Further, we also find a precise expression for
104
+ the constant, as a fixed point of an implicit equation. Our first theorem is stated below.
105
+ Let K be a simplicial complex. For i ≥ 1, we use fi(K) to denote the set of i−simplices of
106
+ the complex. By a slight abuse of notation, we let f0(K) denote the set of non-isolated vertices
107
+ of the complex and ˜f0(K) denote the set of non-isolated vertices of the complex. (A vertex
108
+ v ∈ K is isolated the if v is not a face of any edge in K.) In a pruning phase run on K, all
109
+ dominated (strong-collapsible) vertices of K are simultaneously collapsed. Let Rt(K) denote
110
+ a complex obtained by running t pruning phases over K. Lastly, R∞(K) denotes a complex
111
+ obtained from K after running a maximal series of strong collapses on K. i.e. the core of K.
112
+ Theorem 1. Let c > 1 and X ∼ X(n, c/n). Then there exists a constant γ ≡ γ(c) given by
113
+ the least non-negative fixed point of the function f(x) = exp (−c(1 − x)), x ∈ (0, 1), such that
114
+ 0 < γ < 1/c < 1 and a.a.s. the following holds
115
+ |f0(R∞(X))| = (1 − γ)(1 − cγ)n + o(n).
116
+ 0
117
+ 5000
118
+ 10000
119
+ 15000
120
+ 20000
121
+ 25000
122
+ 0
123
+ 10000
124
+ 20000
125
+ 30000
126
+ 40000
127
+ 50000
128
+ 60000
129
+ 70000
130
+ 80000
131
+ 90000
132
+ 100000
133
+ size of the CORE
134
+ n
135
+ experimental value for c=1.5
136
+ theoretical value for c=1.5
137
+ Figure 1: We ran experiments on ER complexes and plotted the size of the core (strong collapse)
138
+ with n ∈ [0, 105] and c = 1.5. These experiments clearly validates our theoretical results.
139
+ 3
140
+
141
+ 0
142
+ 1000
143
+ 2000
144
+ 3000
145
+ 4000
146
+ 5000
147
+ 6000
148
+ 7000
149
+ 8000
150
+ 9000
151
+ 10000
152
+ 1
153
+ 1.5
154
+ 2
155
+ 2.5
156
+ 3
157
+ 3.5
158
+ 4
159
+ 4.5
160
+ 5
161
+ size of the CORE
162
+ c
163
+ experimental value for n=10000
164
+ theoretical value for n=10000
165
+ Figure 2: In a different set of experiments over ER complexes, we varied the constant c ∈ [1, 5]
166
+ keeping the number of vertices fixes to n = 104.
167
+ We then address a question of algorithmic interest: given an ε ∈ (0, 1), how many rounds
168
+ do we need in the first epoch to get within an εn gap from the actual size of the core? The
169
+ following theorem gives a bound on the number of rounds t as a function of ε.
170
+ Theorem 2. Let X ∼ X(n, c/n) and �� > 0 (sufficiently small) be given. Then there exists
171
+ t ∈ Z+ such that
172
+ (c − cγ)ec
173
+ 1 − ce−c (ce−c)t ≤ ϵ ≤ (c + 1)ec
174
+ 1 − cγ (cγ)t,
175
+ and |f0(Rt(X))| − |f0(R∞(X))| ≤ ϵn + o(n) a.a.s.
176
+ 1.1
177
+ Overview of Proofs and Outline of Sections
178
+ While our analysis shares the general flow of the analysis of the simple collapsibility of LM
179
+ complexes in e.g. [4, 2, 3], there are several differences and difficulties. Firstly, in both cases
180
+ (strong collapse of ER clique complex and simple collapse of LM model) our goal is to find the
181
+ size of the core, rather than whether the complex is collapsible or not. Secondly, in the case of
182
+ the random ER clique complex our analysis needs to take into account the non-homogeneity of
183
+ the complex. That is, maximal simplices in this model can have different sizes. Further, unlike
184
+ in the LM model, the existence of a maximal simplex is not independent of the existence of all
185
+ other possible maximal simplices. Finally perhaps the most interesting difference of the random
186
+ ER clique complex model in our context, is that the effect of removing a vertex is not necessarily
187
+ localized – a fact which requires a fair bit of innovation to handle (in several places), especially
188
+ in proving the concentration bounds in Section 6, and in the later stages of the analysis, in
189
+ Section 7. We present a more detailed overview of the proof strategy used in our concentration
190
+ bound, in the beginning of Section 6.
191
+ With the above caveats in mind, we first briefly review the main ideas of the proof of
192
+ Aronshtam and Linial [3]. The analysis was split into two epochs, each of which were further
193
+ divided into several rounds (phases). In the first epoch, in each round, every simple-collapsible
194
+ simplex was simultaneously collapsed, and this procedure was repeated for a constant number
195
+ of rounds. Aronshtam and Linial [3] used a tree-like model of a random simplicial complex to
196
+ approximate the local structure of the random LM complex and showed that the total number
197
+ of collapsed simplices over all such rounds tended to a constant fraction of the number of initial
198
+ 4
199
+
200
+ simplices, as the number of rounds increased. Moreover, this limit constant could be expressed
201
+ as a fixed point of an implicit equation involving only the distribution parameter c.
202
+ In the analysis of the second epoch, a simplex would be chosen randomly from the set of
203
+ (non-neighbouring) simple-collapsible simplices, and collapsed in each round. The aim was to
204
+ show that in this epoch, the number of simplices collapsed would be asymptotically negligible
205
+ compared to the inital number of simplices present. Thus in summary, the final number of
206
+ deleted simplices is determined by the first epoch itself, and the second epoch serves to show the
207
+ tightness of this bound.
208
+ Our proofs also split the analysis into two epochs. Similar to [3], we show that a certain tree-
209
+ like model of random simplicial complexes provides a good approximation of local neighbourhoods,
210
+ which is done in Section 3. This is followed by the analysis of the first epoch, in Section 4. The
211
+ main theorem of this section gives an expression for the expected number of vertices remaining
212
+ after t rounds (or pruning phases) of the first epoch. Bounds on t as a function of ε, γ and
213
+ c, are given in Theorem 2, which is proved in Section 5. Before beginning the analysis of the
214
+ second epoch however, we need bounds on the concentration of the size of the core itself, as well
215
+ as several other random variables. These are proved in Section 6, where we use the notions of
216
+ critical and precritical (sub)complexes – described in more detail in the beginning of Section 6.
217
+ With these concentration bounds in place, we move to the analysis of the second epoch in
218
+ Section 7.
219
+ 2
220
+ Preliminaries
221
+ In this section we briefly introduce some topological and probabilistic notions. Readers can refer
222
+ to [13] for a comprehensive introduction to topics related to topology and [10] for topics related
223
+ to probability theory and random structures.
224
+ Simplicial complex.
225
+ An abstract simplicial complex K is a collection of subsets of a
226
+ non-empty finite set X, such that for every subset A in K, all the subsets of A are in K. An
227
+ element of K is called a simplex. An element of cardinality d + 1 is called a d-simplex and d
228
+ is called its dimension. Given a simplicial complex K, we denote its geometric realization as
229
+ |K|. A simplex is called maximal if it is not a proper subset of any other simplex in K. A
230
+ sub-collection L of K is called a subcomplex if it is a simplicial complex itself. A subcomplex
231
+ K′ of K is called a d-skeleton of K if it contains all the simplices of K of dimension at most d.
232
+ Erdos Renyi Graph Definition.
233
+ This is the probability space G(n, p) consisting of
234
+ all the graphs on n vertices. Probability of occurrence of a graph with m edges is pm(1 −
235
+ p)(n)(n−1)/2−m. In other words, it is a random graph on n vertices where each edge can occur
236
+ independently with probability p.
237
+ Clique complex and Neighborhood.
238
+ A complex K is a clique or a flag complex if,
239
+ when a subset of its vertices form a clique (i.e. any pair of vertices is joined by an edge), they
240
+ span a simplex. For a vertex v in G, the open neighborhood NG(v) of v in G is defined as
241
+ NG(v) := {u ∈ G | [uv] ∈ E}, here E is the set of edges of G. The closed neighborhood
242
+ NG[v] is NG[v] := NG(v) ∪ {v}. Similarly we define the closed and open neighborhood of an
243
+ edge [xy] ∈ G, NG[xy] and NG(xy) as NG[xy] := N[x] ∩ N[y] and NG(xy) := N(x) ∩ N(y),
244
+ respectively. The above definitions can be extended to any k-clique σ = [v1, v2, ..., vk] of G;
245
+ NG[σ] := �
246
+ vi∈σ N[vi] and NG(σ) := �
247
+ vi∈σ N(vi).
248
+ Star, Link and Simplicial Cone.
249
+ Let σ be a simplex of a simplicial complex K, the
250
+ closed star of σ in K, stK(σ) is a subcomplex of K which is defined as follows, stK(σ) := {τ ∈
251
+ 5
252
+
253
+ K| τ ∪ σ ∈ K}. The link of σ in K, lkK(σ) is defined as the set of simplices in stK(σ) which
254
+ do not intersect with σ, lkK(σ) := {τ ∈ stK(σ)|τ ∩ σ = ∅}. The open star of σ in K, sto
255
+ K(σ) is
256
+ defined as the set stK(σ) \ lkK(σ). Usually sto
257
+ K(σ) is not a subcomplex of K.
258
+ Let L be a simplicial complex and let a be a vertex not in L. Then the set aL defined as
259
+ aL := {a, τ | τ ∈ L or τ = σ ∪ a; where σ ∈ L} is called a simplicial cone.
260
+ Simple collapse.
261
+ Given a complex K, a simplex σ ∈ K is called a free simplex if σ has
262
+ a unique coface τ ∈ K. The pair {σ, τ} is called a free pair. The action of removing a free
263
+ pair: K → K \ {σ, τ} is called an elementary simple collapse. A series of such elementary
264
+ simple collapses is called a simple collapse. We denote it as K ↘ L. A subcomplex Kec of K
265
+ is called an elementary core of K if K↘Kec and Kec has no free pair.
266
+ Removal of a simplex.
267
+ We denote by K \ σ the subcomplex of K obtained by removing
268
+ σ, i.e. the complex that has all the simplices of K except the simplex σ and the cofaces of σ.
269
+ Dominated simplex.
270
+ A simplex σ in K is called a dominated simplex if the link lkK(σ)
271
+ of σ in K is a simplicial cone, i.e. if there exists a vertex v′ /∈ σ and a subcomplex L of K, such
272
+ that lkK(σ) = v′L. We say that the vertex v′ is dominating σ and that σ is dominated by v′,
273
+ which we denote as σ ≺ v′.
274
+ σ-algebra
275
+ The reader can refer to [10] for the definition of σ-algebra.
276
+ d-collapse.
277
+ Given a complex K, the action of removing a dominated k-simplex σ from K is
278
+ called an elementary d-collapse, denoted as K↘↘d{K \σ}. A series of elementary d-collapses
279
+ is called a d-collapse, denoted as K ↘↘k L. We further call a complex K d-collapse minimal
280
+ if it does not have any dominated d simplices. A subcomplex Kk of K is called a d-core if K
281
+ ↘↘k Kd and Kd is d-collapse minimal. A 0-core of a complex K is unique, however it is not
282
+ true in general for k ≥ 1. Like simple collapses, d-collapses preserve the homotopy type of a
283
+ simplicial complex.
284
+ A 0-collapse is a strong collapse as introduced in [6] and 1-collapse is called an edge
285
+ collapse [8]. The following lemma from [8] characterizes the domination of a simplex in the
286
+ special case of a flag complex in terms of neighborhood.
287
+ Lemma 3. Let σ be a simplex of a flag complex K. Then σ will be dominated by a vertex v′ if
288
+ and only if NG[σ] ⊆ NG[v′].
289
+ In this article, our main focus will be the case d = 0, i.e. when σ is a vertex. The next
290
+ lemma from [7], though elementary, is of crucial significance.
291
+ Lemma 4. Let K be a flag complex and let L be any subcomplex of K obtained by strong
292
+ collapses. Then L is also a flag complex.
293
+ Both lemmas (Lemma 3 and Lemma 4) show that strong collapse is well-suited to flag
294
+ complexes. In the next sections we will investigate the reduction capabilities of strong of a clique
295
+ complex of an Erd˝os-R´enyi random graph.
296
+ 3
297
+ Tree process
298
+ In this section, we describe the tree process which is used to simulate the collapse process in the
299
+ first epoch of the (strong) collapse. A one-dimensional tree is built recursively as follows:
300
+ 1. Start with a single node(root).
301
+ 6
302
+
303
+ 2. In the nth iteration, add children to all the leaves at distance n − 1 from the root from
304
+ Poisson distribution with parameter c(> 1).
305
+ Let Tn denote the set of all possible trees after nth iteration for n > 1 and T0 being the root
306
+ itself. Let T := �
307
+ n∈N
308
+ Tn.
309
+ Let γt be the probability that a tree T ∈ Tt is pruned to the root in no more than t − 1 steps.
310
+ Clearly, γ1 = e−c. Set γ0 = 0. Also, we have the following recursive relation which is true in
311
+ general:
312
+ γt+1 = e−c(1−γt).
313
+ Note that, in this process we never prune the root itself even if its degree is 1. We call such
314
+ a process root collapsing. Let γk
315
+ t denote the probability that a tree T ∈ Tt has degree k after
316
+ t − 1 root collapsing steps. Then,
317
+ γk
318
+ t = (c(1 − γt−1))k
319
+ k!
320
+ e−c(1−γt−1)
321
+ Observe that γk
322
+ 1 gives the initial degree distribution. Also, let γ≥2
323
+ t
324
+ denote the probability that a
325
+ vertex has degree atleast 2 after t − 1 root collapsing steps. Then we have
326
+ γ≥2
327
+ t
328
+ =
329
+
330
+
331
+ k=2
332
+ γk
333
+ t = 1 ��� γt(1 + c(1 − γt−1)).
334
+ Define βt := 1 − γt+1. Thus, βt is the probability that atleast t + 1 root collapsing steps are
335
+ needed to isolate the root of a tree T ∈ Tt.
336
+ Define f(x) := e−c(1−x) on the interval [0, 1]. We shall assume c > 1 for the rest of these
337
+ paper unless specified otherwise. Note that f([0, 1]) ⊆ [0, 1], f′(x) = cf(x) and f(x) is strictly
338
+ increasing on the interval [0, 1]. Let ft(x) denoted the function obtained by composing f t times.
339
+ Then ft is also strictly increasing on [0, 1] for all t ≥ 1. As, γ1 > γ0, applying ft−1 on both sides,
340
+ we get γt > γt−1 for all t ≥ 1.
341
+ Also define γ to be the left most zero of the function g(x) := e−c(1−x) − x defined on the
342
+ range [0, ∞). Note that 0 < γ ≤ 1 as g(1) = 0.
343
+ Lemma 5. For c > 1 and γ = γ(c) defined as earlier we have cγ < 1.
344
+ Proof of Lemma 5. Let g(x) be defined as above. So g(γ−) > 0 and g(γ) = 0. Thus, from
345
+ the differentiability of g(x), cγ − 1 = g′(γ) ≤ 0. Now, if cγ − 1 = 0 then ce1−c = 1, which is
346
+ impossible for c > 1. Thus cγ − 1 < 0.
347
+ Now observe that, x ≤ γ =⇒ f(x) ≤ f(γ) = γ. Thus, by the fact that f′(x) = cf(x), f(x)
348
+ restricted on [0, γ] becomes a contraction mapping. So, by Banach Fixed Point theorem, f|[0,γ]
349
+ has an unique fixed point which, in our case, is γ.
350
+ To summarize the above arguments we get the following remark.
351
+ Remark 1. γt converges to 0 < γ < 1/c as an increasing sequence and βt converges to
352
+ 1 − 1/c < β < 1 as a decreasing sequence.
353
+ 4
354
+ First Epoch
355
+ In this section, we present the analysis of the first epoch of the collapse. The first epoch is executed
356
+ in phases and in each phase we remove a maximal set of dominated vertices simultaneously.
357
+ Our goal, in this section, is to prove the following theorem.
358
+ 7
359
+
360
+ Theorem 6. Let X ∼ X(n, c/n). Let E(|f0(Rt(X))|) denote the expected number of non-isolated
361
+ vertices in X after t strong collapse phases and γt be as defined in the last section. Then,
362
+ E(|f0(Rt(X))|) = (1 − γt+1 − cγt + cγ2
363
+ t )n.
364
+ We start by proving some important lemmas about the local structure of the complex. For
365
+ X ∼ X(n, c
366
+ n), let us define the following event,
367
+ D := {deg(v) ≤ log n ∀v ∈ ˜f0(X)}.
368
+ Then, the following lemma can be proved using standard Chernoff bounds.
369
+ Lemma 7. Pr{D} = 1 − on(1).
370
+ Proof of Lemma 7. Note That for any v ∈ ˜f0(X), deg(v) ∼ Bin(n − 1, c/n). Let Dv be the
371
+ event that deg(v) ≤ log(n). Then by the Chernoff Bound on Binomial Distribution
372
+ Pr{¬Dv} = Pr{deg(v) > log(n)}
373
+ ≤ (ec(n − 1)
374
+ n log(n) )log(n)
375
+ ≤ (
376
+ ec
377
+ log(n))log(n)
378
+ = n− log(log(n/ec))
379
+ Thus by the union bound Pr{�
380
+ v∈f0(X) ¬Dv} ≤ n1−log(log(n/ec)). Hence,
381
+ Pr{D} = Pr{
382
+
383
+ v∈f0(X)
384
+ Dv} = 1 − Pr{
385
+
386
+ v∈f0(X)
387
+ ¬Dv} ≥ 1 − n1−log(log(n/ec)) = 1 − on(1)
388
+ By Cl(S) we denote the simplicial closure of the set S. Fix v ∈ ˜f0(X). Define N0 := Cl(v)
389
+ and N−1 := ∅. Also define Ni+1 := Cl({s ∈ X|∃u ∈ ˜f0(Ni)\ ˜f0(Ni−1)|u ⊂ s})∪Ni. Equivalently,
390
+ this can also be defined in terms of the 1-skeleton of the complex.
391
+ Define the event At = {Nt ∈ T }.
392
+ Lemma 8. Let X ∼ X(n, p) and fix v ∈ ˜f0(X). Then Pr{At ∩ D} = 1 − o(1) .
393
+ Proof. If X ∈ D then ˜f0(Nt) = O(logt+1n) and we want to avoid O(log2t+2n) edges to make Nt
394
+ a one dimensional tree. Probability of that happening is
395
+ (1 − c/n)O(log2t+2n) = 1 − o(1)
396
+ Now the the degree of a node of this tree comes from Bin(n − 1, c/n). For large n this
397
+ distribution can be approximated by Po(c).
398
+ Proof of Theorem 6. Recall that for a simplicial complex X, Rt(X) denotes a complex obtained
399
+ after t phases and f0(X) denotes the set of non-isolated vertices (0−simplices) of the complex.
400
+ Note that if a vertex v ∈ f0(X) survived t pruning steps then it must have had degree deg(v) ≥ 2
401
+ after t − 1 pruning steps. Thus, Pr{v ∈ f0(Rt(X))} ≤ γ≥2
402
+ t
403
+ = 1 − γt(1 + c(1 − γt−1)). This event
404
+ counts both the isolated vertices and degree one, (i.e., collapsible) vertices. Thus this gives a
405
+ slight over estimate. To get more precise estimate we observe that, in the spirit of [3], that a
406
+ vertex v ∈ f0(X) survives t pruning steps if it is neither collapsed nor isolated after t pruning
407
+ steps. Probability of such an event is 1 − γt+1 + cγt − cγ2
408
+ t . The previous lemma asserts that it is
409
+ indeed the survival probability of a vertex of the simplicial complex.
410
+ 8
411
+
412
+ 5
413
+ Rate of Convergence
414
+ In this section, we prove Theorem 2 thus giving bounds on the rate of convergence of the variable
415
+ γt.
416
+ Lemma 9. Let c, γt be as defined earlier. Then
417
+ cγt ≤ γt+1 − γt
418
+ γt − γt−1
419
+ ≤ cγt+1
420
+ Proof. Let f(x) and g(x) be as defined in section 3. Clearly,
421
+ γt+1 − γt
422
+ γt − γt−1
423
+ =
424
+ g(γt)
425
+ g(γt−1)
426
+ = g(γt−1 + (γt − γt−1))
427
+ g(γt−1)
428
+ = g(γt−1) + g′(s)(γt − γt−1)
429
+ g(γt−1)
430
+ (for some s ∈ [γt−1, γt])
431
+ = f′(s) = cf(s)
432
+ (as γt − γt−1 = γt−1)
433
+ As f(x) is an increasing function the result follows.
434
+ In particular, ce−c ≤ γt+1−γt
435
+ γt−γt−1 ≤ cγ for all t ≥ 1.
436
+ Let δ(t) = (1 − γt+1 − cγt + cγ2
437
+ t ) − (1 − γt+2 − cγt+1 + cγ2
438
+ t+1), as defined in section 7. It can
439
+ be shown that
440
+ δ(t) = (1 − γt+1 − cγt + cγ2
441
+ t ) − (1 − γt+2 − cγt+1 + cγ2
442
+ t+1)
443
+ = (γt+2 − γt+1) + c(γt+1 − γt) + c(γt + γt+1)(γt − γt+1)
444
+ = (γt+2 − γt+1
445
+ γt+1 − γt
446
+ + c − c(γt+1 + γt))(γt+1 − γt)
447
+ Hence,
448
+ (c − cγ)ec(ce−c)t ≤ δ(t) ≤ (c + 1)ec(cγ)t
449
+ Now define ϵ ≡ ϵ(t) := (1 − γt+1 − cγt + cγ2
450
+ t ) − (1 − γ − cγ + cγ2) so that E[|f0(Rt(X))|] −
451
+ (1 − γ)(1 − cγ] = ϵn. Consequently,
452
+ ϵ(t) = (1 − γt+1 − cγt + cγ2
453
+ t ) − (1 − γ − cγ + cγ2)
454
+ = (γ − γt+1) + c(γ − γt) + c(γt + γ)(γt − γ)
455
+ = (γ − γt+1) + (c − c(γt + γ))(γ − γt)
456
+ So,
457
+ (c − cγ)ec
458
+ 1 − ce−c (ce−c)t ≤ ϵ(t) ≤ (c + 1)ec
459
+ 1 − cγ (cγ)t.
460
+ Thus we get the following corollary.
461
+ Corollary 10. for any t ≥ 1
462
+ ec(ce−c)t ≤ γt+1 − γt ≤ ec(cγ)t
463
+ and
464
+ ec
465
+ 1 − ce−c (ce−c)t ≤ γ − γt ≤
466
+ ec
467
+ 1 − cγ (cγ)t.
468
+ From the above corollary, we get the Theorem 2.
469
+ 9
470
+
471
+ 6
472
+ Concentration of Size of the Complex after the First Epoch
473
+ In this section, we shall prove a concentration bound on the size of the core. Unlike in the case
474
+ of simple collapses in d-dimensional LM complexes [2, 3], concentration bounds in our case are
475
+ less straightforward. Observe firstly, that deleting a single vertex v could potentially change
476
+ the domination status of an arbitrary number of vertices, as for example when v dominates the
477
+ entire complex. Thus the influence of a vertex can be n in the worst case. Therefore we shall
478
+ need to use an edge exposure martingale inequality, in the form of a variant of an inequality of
479
+ Freedman [11], given by Warnke [20], which allows us to consider the path variance of the effect
480
+ of a single edge, rather than the worst case effect.
481
+ In order to bound the path variance, we shall show that if the influence of a variable is large,
482
+ there is a specific class of subcomplexes, which we call Critical Complexes, one of which must
483
+ occur in the 1-skeleton of the complex. It is not hard to show (and we do) that the probability
484
+ of occurence of these subgraphs is vanishingly low in the original random complex. However, the
485
+ variance needs to be controlled at all steps in the edge exposure martingale, i.e. when we are
486
+ computing expectations over arbitrarily small subcomplexes of the original complex. To handle
487
+ this, we need to define a superset of critical complexes, which we call Precritical Complexes,
488
+ and show that their probability of occurence will still be vanishingly small throughout the edge
489
+ exposure process. We can then define a stopped martingale which stops if at any step of the edge
490
+ exposure process, a precritical complex occurs, and prove concentration bounds using Warnke’s
491
+ inequality for this martingale. The final concentration bound is then the bound obtained for
492
+ the stopped martingale, together with the probability that the martingale ever encounters a
493
+ precritical complex.
494
+ Fix p = c
495
+ n and m = n(n−1)
496
+ 2
497
+ . For 1 ≤ i ≤ m, ei ∼ Bernoulli(p) be i.i.d. random variables
498
+ corresponding to existance of edges. Clearly X(n, p) = e1 × · · · × em as probability spaces.
499
+ Now we can define a filtration of σ-algebras {Fi}m
500
+ i=0 on X(n, p) by setting Fi to the σ-algebra
501
+ corresponding to e1, · · · , ei.
502
+ Let X ∼ X(n, p). Now we construct an edge exposure martingale (see e.g. [1] for a definition
503
+ of the edge exposure martingale) as follows: Clearly, Ym = |f0(Rt(X))| and Y0 = E(|f0(Rt(X))|).
504
+ This section is devoted to prove the following concentration result, which says that the size
505
+ of the complex after t pruning rounds of the first epoch is close to its expected value with high
506
+ probability.
507
+ Theorem 11. (Main Theorem) Let X ∼ X(n, p). Let |f0(Rt(X))| be number of vertices after t
508
+ strong collapsing phases and Y0 = E(|f0(Rt(X))|) be its expected value. Then for any s ≥ 0 we
509
+ have,
510
+ Pr{||f0(Rt(X))| − Y0| ≥ s · n
511
+ 2
512
+ 3 }
513
+
514
+ 2 exp
515
+
516
+
517
+ s2 · n
518
+ 1
519
+ 3
520
+ (c4t+1 + (2/3)sn−1/32t+1) + O(1/n)
521
+
522
+ + O(1/n)
523
+ =
524
+ on(1).
525
+ To prove this we begin by observing some combinatorial results. In the following lemmas, we
526
+ show that the influence of deleting one vertex is bounded, with high probability.
527
+ Lemma 12. Pr{deleting a vertex b gives birth to k newly generated dominated vertices} ≤
528
+ O(
529
+ 1
530
+ n3k−4 ).
531
+ Proof of Lemma 12. We first claim that deleting a vertex b gives birth to k newly generated
532
+ dominated vertices then b atleast have k neighbors one of which is the dominating vertex of
533
+ b. In the following diagram, the solid arrow denotes domination and the white arrow denotes
534
+ 10
535
+
536
+ future domination in the next phase only after deleting vertex b. The pointy head of the arrow is
537
+ towards the dominated vertex. Vertices a, b, c may be connected to other vertices. The following
538
+ diagrams exhibits some of the potential arrangements.
539
+ a
540
+ b
541
+ c
542
+ a
543
+ b
544
+ c
545
+ a
546
+ b
547
+ c1
548
+ ci
549
+ cj
550
+ . . .
551
+ A careful inspection will show that these kind of arrangements are impossible. Indeed if
552
+ it happens that will imply that the would-be-dominated vertices are already dominated. This
553
+ is because we are only deleting b which is a common neighbor of all the would-be-dominated-
554
+ dominating pairs. Thus neighbors of b can not have white arrows between themselves.
555
+ Thus fig:1 gives the necessary minimal arrangements for the birth of k newly generated
556
+ dominated vertices. In the following diagram, all the ci’s and their corresponding d’s are assumed
557
+ to be connected to some non-neighbor of a which lies in the set {e1, · · · , el′}. We claim that in
558
+ such case f1 − f0 ≥ (k − 2) + (k − 1) + (k − 1) ≥ 3k − 4. We shall prove our claim by induction
559
+ on k ≥ 2. The case k = 2 is evident from the following diagram.
560
+ a
561
+ b
562
+ c1
563
+ d
564
+ e
565
+ We now prove the induction step. Consider the following figure again.
566
+ a
567
+ b
568
+ c1
569
+ ck−2 ck−1
570
+ {d1, · · · , dl}
571
+ {e1, · · · , el′}
572
+ . . .
573
+ Now assume that the claim holds for k − 1. Now just adding the kth vertex ck−1 increases
574
+ f1 − f0 by 1. Also the corresponding d and e increases f1 − f0 by 1 each. This ends the
575
+ induction step. So in the all the possible minimal arrangements f1 − f0 ≥ 3k − 4. Thus expected
576
+ number of such arrangements is
577
+ � n
578
+ f0
579
+
580
+ · (c/n)f1 = O(
581
+ 1
582
+ n3k−4 ). Thus the result follows from Markov’s
583
+ inequality.
584
+ Corollary 13. Pr{deleting a vertex b gives birth to 3 newly generated dominated vertices}
585
+ ≤ O( 1
586
+ n5 ).
587
+ Corollary 14. Let X ∼ X(n, p) and e ∈ f1(X), then
588
+ Pr{|f0(Rt(X)) \ f0(Rt(X \ {e}))| ≥ 2t+1} ≤ O( 1
589
+ n5 ).
590
+ 11
591
+
592
+ Proof. If such an event happens then there must be a dominated vertex in the process whose
593
+ deletion creates atleast 3 new dominating vertices. Thus the result follows from the previous
594
+ corollary.
595
+ Let Critical Complexes denote the minimal simplicial complexes corresponding to k = 3 (see
596
+ the following diagrams). Let Precritical complexes be any of the Critical Complex without any
597
+ four of the edges.. Let N denote the set of complexes from X(n, p) that contains a Critical
598
+ Complex and N′ denote the set of complexes contains a Precritical Complex. . Clearly, N′ ⊇ N.
599
+ a
600
+ b
601
+ c1
602
+ c2
603
+ d
604
+ e
605
+ a
606
+ b
607
+ c1
608
+ c2
609
+ d1
610
+ d2
611
+ e
612
+ a
613
+ b
614
+ c1
615
+ c2
616
+ d
617
+ e1
618
+ e2
619
+ a
620
+ b
621
+ c1
622
+ c2
623
+ d1
624
+ d2
625
+ e1
626
+ e2
627
+ The following result is immediate.
628
+ Lemma 15. Let X ∼ X(n, p). Then,
629
+ Pr{X ∈ N} ≤ O( 1
630
+ n5 ),
631
+ and
632
+ Pr{X ∈ N′} ≤ O( 1
633
+ n).
634
+ Now define stopping time τ on {Fi}m
635
+ i=0 such that τ = t if t = min(s≤t){Fs ∈ N′}. Define a
636
+ stopped martingale with respect to {Fi}m
637
+ i=0 by Mi := Yi∧τ.
638
+ We first prove the following theorem.
639
+ Theorem 16. (Stopped Martingale inequality) Let {Mi}m
640
+ i=0 be the stopped martingale defined
641
+ as above. Then for any s ≥ 0 we have,
642
+ Pr{|Mm − M0| ≥ s · n2/3} ≤ 2 exp
643
+
644
+
645
+ s2 · n1/3
646
+ (c4t+1 + (2/3)sn−1/32t+1) + O(1/n)
647
+
648
+ .
649
+ In order to prove the above theorem, we shall use the following lemma from Warnke [20].
650
+ Assume that {FK}0≤k≤N is an increasing sequence of σ-algebras, and {MK}0≤k≤N is an
651
+ {FK}0≤k≤N-adapted bounded martingale.
652
+ Lemma 17. (2-sided version of Bounded Variance martingale Inequality) Let Uk be a Fk−1
653
+ variable satisfying |Mk − Mk−1| ≤ Uk. Set Ck = maxi∈[k] Uk and Vk = �
654
+ i∈[k] V (Mi−Mi−1|Fi−1).
655
+ Let φ(x) = (1 + x) log(1 + x) − x. For every s ≥ 0 and V, C > 0 we have
656
+ Pr{|MK − M0| ≥ s, Vk ≤ V, Ck ≤ C for some k ∈ [N]} ≤ 2e−s2/(2V +2Cs/3)
657
+ Theorem 11 essentially follows from Theorem 16 and Lemma 15.
658
+ 12
659
+
660
+ Proof. Proof of Theorem 16
661
+ Let X ∈ X(n, p). We shall first try to calculate Pr{X ∈ N|ei, · · · , e1} where (e1, · · · , ei)
662
+ does not form any precritical complex. Let M ⊂ N be the set of complexes that contains
663
+ some critical complex not involving any of the edges from {ei, · · · , e1} and M′ ⊂ N be the set
664
+ of complexes where all the critical complexes involves some edges from {ei, · · · , e1}. Clearly,
665
+ N = M ⊔ M′. Thus,
666
+ Pr{X ∈ N|ei, · · · , e1} = Pr{X ∈ M ⊔ M′|ei, · · · , e1}
667
+ = Pr{X ∈ M|ei, · · · , e1} + Pr{X ∈ M′|ei, · · · , e1}
668
+ = Pr{X ∈ M} + Pr{X ∈ M′|ei, · · · , e1}
669
+ Pr{X ∈ M} is the probability that {ei+1, · · · , em} contains a critical complex. By reasoning
670
+ similar to the proof of Lemma 12. We get Pr{X ∈ M} = O(1/n5). On the other hand, note
671
+ that as (ei, · · · , e1) does not contain any precrtitical complex, atleast 5 more edges is needed
672
+ for X to form a critical complex involving some edges {ei, · · · , e1}. Suppose, depending on
673
+ (e1, · · · , ei), k more edges are needed to complete a critical complex. Clearly 5 ≤ k ≤ 13. Also
674
+ note that, in a critical complex, there are atmost 2 vertices of degree two and rests have degree
675
+ atleat 3. Thus even in the worst case one need to choose 3 vertices and construct 5 particular
676
+ edges. Thus Pr{X ∈ M′|ei, · · · , e1} = O(1/n2). Hence, Pr{X ∈ N|ei, · · · , e1} = O(1/n2) given
677
+ (e1, · · · , ei) does not form any precritical complex. In particular,
678
+ Pr{|f0(Rt(X)) \ f0(Rt(X \ {ei+1}))| ≥ 2t+1|ei, · · · , e1} ≤ O( 1
679
+ n2 )
680
+ under the same assumption.
681
+ Thus
682
+ E(|f0(Rt(X)) \ f0(Rt(X \ {ei+1}))||ei, · · · , e1) ≤ 2t+1 + n · O(1/n2) ≤ 2t+1 + O(1/n)
683
+ whenever (ei, · · · , e1) does not contain any precrtitical complex.
684
+ We now claim that |Mi+1 − Mi| ≤ 2t+1 + O(1/n). If (ei, · · · , e1) contains a precritical
685
+ complex then the martingale stops and the claim holds. Now suppose (ei, · · · , e1) does not
686
+ contain any precritical complex. Then
687
+ |[Mi+1 − Mi](1, ei, · · · , e1)| = |Mi+1(1, ei, · · · , e1) − Eei+1[Mi+1]|
688
+ = |Mi+1(1, ei, · · · , e1) − p · (Mi+1(1, ei, · · · , e1)) − (1 − p) · (Mi+1(0, ei, · · · , e1))|
689
+ = |(1 − p) · (Mi+1(1, ei, · · · , e1) − Mi+1(0, ei, · · · , e1))|
690
+ ≤ (1 − p)(2t+1 + O(1/n))
691
+ Similarly,
692
+ |[Mi+1 − Mi](0, ei, · · · , e1)| = |Mi+1(0, ei, · · · , e1) − Eei+1[Mi+1]|
693
+ = |Mi+1(0, ei, · · · , e1) − p · (Mi+1(1, ei, · · · , e1)) − (1 − p) · (Mi+1(0, ei, · · · , e1))|
694
+ = |p · (Mi+1(0, ei, · · · , e1) − Mi+1(1, ei, · · · , e1))|
695
+ ≤ p(2t+1 + O(1/n))
696
+ Hence the claim follows.
697
+ Next we claim that V ar(Mi+1 − Mi|Fi) = V ar(Mi+1|Fi) ≤ (c/n)(4t+1 + O(1/n)) ≤ O(1/n).
698
+ Indeed if (ei, · · · , e1) contains a precritical complex then the martingale stops and the variance
699
+ is zero. Otherwise
700
+ 13
701
+
702
+ V ar(Mi+1 − Mi|ei, · · · , e1) = p · ([Mi+1 − Mi](1, ei, · · · , e1))2 + (1 − p) · ([Mi+1 − Mi](0, ei, · · · , e1))2
703
+ ≤ p(1 − p)2(2t+1 + O(1/n))2 + (1 − p)p2(2t+1 + O(1/n))2
704
+ ≤ p(1 − p)(4t+1 + O(1/n))
705
+ ≤ (c/n)(4t+1 + O(1/n))
706
+ Thus by Lemma 17,
707
+ Pr{|Mm − M0| ≥ s · n
708
+ 2
709
+ 3 } ≤ 2 exp(−
710
+ s2 · n
711
+ 4
712
+ 3
713
+ (n2 − n) · (c/n)(4t+1 + O(1/n)) + 2/3 · (2t+1 + O(1/n)) · s · n)
714
+ ≤ 2 exp(−
715
+ s2 · n
716
+ 4
717
+ 3
718
+ (n − 1)c4t+1 + (2/3)sn2/32t+1 + O(1))
719
+ ≤ 2 exp(−
720
+ s2 · n
721
+ 4
722
+ 3
723
+ nc4t+1 + (2/3)sn2/32t+1 + O(1))
724
+ ≤ 2 exp(−
725
+ s2 · n
726
+ 4
727
+ 3
728
+ n(c4t+1 + (2/3)sn−1/32t+1) + O(1))
729
+ ≤ 2 exp(−
730
+ s2 · n
731
+ 1
732
+ 3
733
+ (c4t+1 + (2/3)sn−1/32t+1) + O(1/n))
734
+ Proof. Proof of Theorem 11 Theorem 11 follows from Theorem 16 and Lemma 15 via the
735
+ following inequalities.
736
+ Pr{|Ym − Y0| ≥ s} = Pr{|Ym − Y0| ≥ s and ¬N′} + Pr{|Ym − Y0| ≥ s and N′}
737
+ ≤ Pr{|Mm − M0| ≥ s} + Pr{N′}
738
+ Thus
739
+ Pr{|Ym − Y0| ≥ s · n
740
+ 2
741
+ 3 } ≤ 2 exp
742
+
743
+
744
+ s2 · n
745
+ 1
746
+ 3
747
+ (c4t+1 + (2/3)sn−1/32t+1) + O(1/n)
748
+
749
+ + O(1/n) = on(1).
750
+ Now set X′
751
+ 0 := |f0(Rt(X))| − |f0(Rt+1(X))|.
752
+ Lemma 18. For any s > 0, Pr{|X′
753
+ 0 − E[X′
754
+ 0]| > sn
755
+ 2
756
+ 3 } < on(1).
757
+ Proof of Lemma 18. Observe that
758
+ |X′
759
+ 0 − E[X′
760
+ 0]| > t =⇒ |(|f0(Rt−1(X))| − |f0(Rt(X))|) − (E[|f0(Rt−1(X))|] − E[|f0(Rt(X))|])| > t
761
+ =⇒ ||f0(Rt(X)| − E[|f0(Rt(X))|]| + ||f0(Rt−1(X)| − E[|f0(Rt−1(X))|]| > t
762
+ =⇒ ||f0(Rt(X)| − E[|f0(Rt(X))|]| > t/2
763
+ or
764
+ ||f0(Rt−1(X)| − E[|f0(Rt−1(X))|]| > t/2
765
+ Hence, by union bound,
766
+ Pr{|X′
767
+ 0 − E[X′
768
+ 0]| > sn
769
+ 2
770
+ 3 }
771
+ ≤ Pr{||f0(Rt(X)| − E[|f0(Rt(X))|]| > (s/2)n
772
+ 2
773
+ 3 }+Pr{||f0(Rt−1(X)| − E[|f0(Rt−1(X))|]| > (s/2)n
774
+ 2
775
+ 3 } ≤ on(1)
776
+ Let X0 be the random variable that denotes the number of dominated vertices at the end
777
+ of the first epoch. Clearly 0 ≤ X0 ≤ |f0(Rt(X))| − |f0(Rt+1(X))| = X′
778
+ 0. As X′
779
+ 0 ≤ E[X′
780
+ 0] + o(n)
781
+ a.a.s. we get that 0 ≤ X0 ≤ E[|f0(Rt(X))|] − E[|f0(Rt+1(X))|] + o(n) a.a.s.
782
+ 14
783
+
784
+ 7
785
+ Second Epoch
786
+ The second epoch will be a slower version of the first epoch. Here a dominated vertex is chosen
787
+ uniformly randomly and is removed. The process continues until there is no more dominated
788
+ vertices. Similar to the proof of [3], our strategy shall be to show that when a dominated vertex
789
+ is deleted, the expected number of newly created dominated vertices is strictly less than 1, so
790
+ that within o(n) steps, the strong collapse process comes to a halt. Thus the size of the core will
791
+ be – up to a o(n)-factor – the number of vertices remaining after the first epoch.
792
+ Let after t pruning phases the first epoch ends and the second epoch begins. Also, Let Yi be
793
+ the random variable that denotes number of newly generated dominated vertices solely by the
794
+ deletion of the dominated vertex at the i-th step of the second epoch. Note that Yi ∈ {0, · · · , n}.
795
+ First we try to calculate Pr{Yi = 1}.
796
+ Lemma 19. For any i ≤ 1
797
+ 12n(1 − γ)(1 − cγ),we have E[Yi] ≤ 1 − 3
798
+ 4(1 − cγ) + on(1) < 1 + on(1).
799
+ Proof of Lemma 19. We shall say that a vertex is affected by ith collapse in the second epoch if
800
+ its degree is changed by that collapsing step. Define Qi be the subset of the event {Yi = 1} that
801
+ the newly generated vertex by the ith collapse is affected for the first time. Clearly the event
802
+ {{Yi = 1} ∩ Qi} represents the fact that only one vertex, say v, is newly generated by the ith
803
+ collapse (of vertex u) and v is affected for the first time. Thus that particular vertex retains the
804
+ local structure since the first epoch. The idea here is that the edge {u, v} can be attached to
805
+ any of the possible places after the first epoch ends. We are only calculating the probability
806
+ that is is attached to a suitable vertex of Rt(X) \ {{u, v}, {u}}. To calculate Pr{{Yi = 1} ∩ Qi}
807
+ we shall further partition it into two events. To this end, define P be the event that deg(v) = 2
808
+ after i − 1 steps. Thus the probability Pr{{Yi = 1} ∩ Qi ∩ P} is the ratio of the numbers of
809
+ degree one vertex in Rt(X) \ {{u, v}, {u}} to the number of non-isolated vertices after t − 1
810
+ phase of the first epoch, as done in eq. 8 of [3]. It can be shown, by using similar arguments like
811
+ section 6, that both these quantities are concentrated around their mean. These two quantities
812
+ are, respectively, equal to c(1 − γt)γt+1n + o(n) and (1 − γt)n + o(n) a.a.s.
813
+ For the event {Yi = 1} ∩ Qi ∩ P to occur v must be a part of a 2-simplex. Now we shall
814
+ bound the number of 2-simplices remaining after the first epoch. Observe that during the
815
+ collapsing phases number of 2-simplices can only decrease. Let us define the random variables
816
+ T := |f2(X)| and T ′ := |f2(Rt(X))| for X ∼ X(n, c/n). Clearly T ′ ≤ T. From Markov’s
817
+ inequality we get that Pr{T > log(n)} ≤ O(1/ log(n)). Thus, Pr{T ′ > log(n)} ≤ O(1/ log(n)).
818
+ So Pr{{Yi = 1} ∩ Qi ∩ P} ≤ O(log(n))/(1 − γt)n a.a.s. Thus, a.a.s. Pr{{Yi = 1} ∩ Qi} ≤
819
+ c(1−γt)γt+1n
820
+ (1−γt)n
821
+ + O(log(n))/(1 − γt)n ≤ c(1−γt)γt+1
822
+ (1−γt)
823
+ + on(1).
824
+ Now we need to calculate Pr{{Yi = 1} ∩ Qi}. To do this we shall again partition this event
825
+ into two disjoint events. Let Ai denote the number of affected vertices at ith step of the second
826
+ epoch. Now define the event Bi := �i
827
+ j=1{Aj ≤ 3}. So, Pr{{Yi = 1} ∩ Qi ∩ Bi−1} ≤
828
+ 3(i−1)
829
+ n(1−γt).
830
+ To calculate Pr{{Yi = 1} ∩ Qi ∩ Bi−1} first observe that Pr{{Yi = 1} ∩ Qi ∩ Bi−1} ≤
831
+ Pr{Bi−1} ≤ Σi−1
832
+ j=1{Aj ≥ 4} . But for {Ai ≥ 4} to happen the corresponding dominated vertex u
833
+ must be a part of the following arrangement.
834
+ a
835
+ u
836
+ ck
837
+ ci
838
+ cj
839
+ Let S and S′ denote the number of such arrangements in X and Rt(X), respectively, for
840
+ X ∼ X(n, c/n). Clearly, S′ ≤ S. From Markov’s inequality we get Pr{S ≥ 1} ≤ O(1/n2). Thus,
841
+ Pr{S′ ≥ 1} ≤ O(1/n2). Hence, a.a.s. {Bi} never happens.
842
+ 15
843
+
844
+ Similar argument combined with lemma 12 gives that Pr{Yi ∈ {2, · · · , n}} ≤ O(1/n2).
845
+ By collecting all the terms we have the following inequality.
846
+ E[Yi] = Σn
847
+ j=0j · Pr{Yi = j}
848
+ ≤ Pr{Yi = 1} + n · O(1/n2)
849
+ ≤ Pr{{Yi = 1} ∩ Qi} + Pr{{Yi = 1} ∩ Qi} + n · O(1/n2)
850
+ ≤ Pr{{Yi = 1} ∩ Qi} + Pr{{Yi = 1} ∩ Qi ∩ Bi−1} + Pr{{Yi = 1} ∩ Qi ∩ Bi−1} + n · O(1/n2)
851
+ ≤ c(1 − γt)γt+1
852
+ 1 − γt
853
+ + 3(i − 1)
854
+ n(1 − γt) + O(1/n) + on(1)
855
+ ≤ cγt+1 + 3(i − 1)
856
+ n(1 − γ) + on(1)
857
+ ≤ cγ + ϵi + on(1)
858
+ ≤ cγ + 1
859
+ 4(1 − cγ) + on(1)
860
+ ≤ 1 − 3
861
+ 4(1 − cγ) + on(1) < 1 + on(1), by lemma 5.
862
+ Note that for any fixed i, ϵi = on(1) and for c > 1 we have cγ < 1.
863
+ Let Xi be the number of dominated vertices at the end of the ith step of the second epoch.
864
+ Then we have
865
+ Xi = Xi−1 − 1 + Yi = X0 − n + Σn
866
+ i=1Yi
867
+ Untill the second epoch ends. If the second epoch stops at i′th step then Xj = 0
868
+ ∀j > i′.
869
+ Thus we have
870
+ E[X0] ≤ E[|f0(Rt(X))|] − E[|f0(Rt+1(X))|]
871
+ = (1 − γt+1 − cγt + cγ2
872
+ t )n − (1 − γt+2 − cγt+1 + cγ2
873
+ t+1)n
874
+ and,
875
+ E[Xi] = E[Xi−1] − 1 + Yi = E[X0] − n + Σn
876
+ i=1Yi
877
+ as long as the second epoch continues.
878
+ Now let us define δ ≡ δ(t) := (1 − γt+1 − cγt + cγ2
879
+ t ) − (1 − γt+2 − cγt+1 + cγ2
880
+ t+1) so that
881
+ E[X0] ≤ nδ.
882
+ Now we present the main lemmas of this section. The first lemma below shows that with
883
+ high probability, for any sufficiently small ε > 0, we can choose a sufficiently large t, such that
884
+ the number of vertices deleted in the second epoch is less than εn. The proof is by modelling
885
+ the number of remaining dominated vertices after i steps of the epoch, as a biased random walk.
886
+ Lemma 20. ∀0 < ϵ < min{ 1
887
+ 12(1 − γ)(1 − cγ),
888
+ 5
889
+ 192(1 − γ)(1 − cγ)2}
890
+ ∃T such that ∀t > T a.a.s.
891
+ at most ϵn vertices will be deleted from Rt(X) before algorithm reaches the core.
892
+ Proof of Lemma 20. Choose t such that δ ≡ δ(t) < 1
893
+ 8ϵ(1 − cγ). This can be done because as t
894
+ increases γt − γt+1 and γt+1 − γt+2 approaches zero.
895
+ Now suppose that the second epoch runs for ρ = ϵn steps. We shall show that E[Xρ] = 0
896
+ a.a.s., i.e., there is no more dominated vertex left to be deleted. To this end we define a sequence
897
+ of new random variable {Zi} as follows:
898
+ Z0 := X0
899
+ 16
900
+
901
+ and,
902
+ Zi := Zi−1 − 1 + Yi = Z0 − n + Σn
903
+ i=1Yi
904
+ Note that Xi ≤ Zi and Zi ≤ 0 =⇒ Xi = 0.
905
+ As ρ = ϵn ≤ 1
906
+ 4n(1 − γ)(1 − cγ), at the end of the ρ steps number of dominated vertices
907
+ remaining is
908
+ E[Xρ] ≤ E[Zρ] = E[Z0] − Σρ
909
+ i 1 + Σρ
910
+ i E[Yi]
911
+ ≤ nδ − 3
912
+ 4ρ(1 − cγ)
913
+ by lemma 19
914
+ ≤ 1
915
+ 8nϵ(1 − cγ) − 3
916
+ 4nϵ(1 − cγ)
917
+ ≤ − 5
918
+ 96(1 − γ)(1 − cγ)2n ≤ 0
919
+ Hence E[Xρ] = 0.
920
+ Now we shall proving the concentration. Let us define l :=
921
+ 5
922
+ 96(1 − γ)(1 − cγ)2. Next we
923
+ proceed to show
924
+ Pr{Zρ > 0} ≤ Pr{Zρ − E[Zρ] > l · n} < on(1)
925
+ Write Zρ as Zρ ≡ Z0 + Z′
926
+ ρ(Y1, · · · , Yρ). First observe that, from lemma 18, Pr{Z0 − E[Z0] >
927
+ (l/2)n} ≤ Pr{|Z0 − E[Z0]| > (l/2)n} ≤ on(1) for any s > 0.
928
+ From the main geometric lemma we get
929
+ E[et(Yi−E[Yi])] ≤ E[etYi] ≤ 1 + et + O(e2t
930
+ n2 · 1 − (et/n3)n−1
931
+ 1 − et/n3
932
+ )
933
+ Fix t > 0. Then,
934
+ Pr{Z′
935
+ ρ − E[Z′
936
+ ρ] > (l/2) · n} = Pr{Σρ
937
+ i Yi − Σρ
938
+ i E[Yi] > (l/2) · n}
939
+ = Pr{et(Σρ
940
+ i Yi−Σρ
941
+ i E[Yi]) > etn(l/2)}
942
+ ≤ et(Σρ
943
+ i Yi−Σρ
944
+ i E[Yi])/etn(l/2)
945
+
946
+ (1 + et + O( e2t
947
+ n2 · 1−(et/n3)n−1
948
+ 1−et/n3
949
+ ))ρ
950
+ etn(l/2)
951
+ setting t = log(n) we get
952
+ Pr{Z′
953
+ ρ − E[Z′
954
+ ρ] > (l/2) · n} ≤
955
+ (1 + n + O( 1−(1/n2)n−1
956
+ 1−1/n2
957
+ ))ρ
958
+ nn(l/2)
959
+
960
+ (1 + n + O(
961
+ 1
962
+ 1−1/n2 ))ϵn
963
+ nn(l/2)
964
+ ≤ ((O(1) + n)ϵ)n
965
+ (n(l/2))n
966
+ ≤ ((O(1) + n)ϵ
967
+ n(l/2)
968
+ )n ≤ on(1)
969
+ As ϵ < l/2 ,the quantity approaches zero as n increases, thus the last inequality follows. So,
970
+ Pr{Zρ − E[Zρ] > l · n} ≤ Pr{Z0 − E[Z0] > (l/2) · n} + Pr{Z′
971
+ ρ − E[Z′
972
+ ρ] > (l/2) · n} ≤ on(1)
973
+ 17
974
+
975
+ The next lemma follows from properties of γt and the concentration bounds presented in
976
+ Section 6.
977
+ Lemma 21. ∀0 < δ
978
+ ∃T such that ∀t > T a.a.s.
979
+ (1 − γ)(1 − cγ)n + o(n) ≤ |f0(Rt(X))| ≤ (1 − γ)(1 − cγ)n + δn + o(n).
980
+ Proof of Lemma 21. From theorem 6 and theorem 11 it can the shown that a.a.s.
981
+ |f0(Rt(X))| = (1 − γt+1 − cγt + cγ2
982
+ t )n + o(n)
983
+ Define h(x) := 1 − e−c(1−x) − cx(1 − x) on the interval (0, 1). It can be checked that on this
984
+ interval h′(x) < 0, i.e., h(x) is strictly decreasing. Thus we have that (1−γt+2 −cγt+1 +cγ2
985
+ t+1) ≤
986
+ (1 − γt+1 − cγt + cγ2
987
+ t ). Hence the left inequality follows.
988
+ Note that {γt}t is a monotonically increasing sequence that converges to γ. Therefore,
989
+ {(1−γt+1−cγt+cγ2
990
+ t )}t is a monotonically decreasing sequence that converges to (1−γ−cγ+cγ2).
991
+ Thus, by choosing sufficiently large T, we can restrict {(1 − γt+1 − cγt + cγ2
992
+ t )}t>T inside a δ-ball
993
+ round (1 − γ − cγ + cγ2) for any δ > 0. Hence the right inequality follows.
994
+ Using above two lemmas we have the proof of our first main result Theorem 1 about the size
995
+ of the core (after strong collapse) of a ER complex.
996
+ Proof of Theorem 1. By Lemma 20 and Lemma 21.
997
+ 8
998
+ End Range Phase Transition
999
+ Let P(X) denote the number all possible dominated-dominating pairs. It can be shown that
1000
+ for X ∼ X(n, p), E[P(X)] = (n(n − 1)/2)p(1 − p(1 − p))n−2. For p = λ log(n)
1001
+ n
1002
+ , where λ > 1,
1003
+ E[P(X)] = O( n log(n)
1004
+
1005
+ ) = on(1). Thus, by Markov’s inequality, a.a.s. there is no dominated
1006
+ vertex to start the collapsing procedure.
1007
+ Now we shall focus on the behavior of X ∼ X(n, p) where p = 1 − λ log(n)
1008
+ n
1009
+ . For λ > 2,
1010
+ E[P(X)] = O( n2
1011
+ nλ ) = on(1). Thus a.a.s. there is no dominated vertex to start the collapsing
1012
+ procedure. But the situation is quite opposite when λ < 1 as the following lemma claims.
1013
+ Lemma 22. For X ∼ X(X, 1 − λ log n
1014
+ n
1015
+ ) and λ < 1, a.a.s. X is collapsible.
1016
+ Proof of Lemma 22. We shall show that, in this range, a.a.s. there exits a vertex adjacent to
1017
+ every other vertices. Let us define the random variable V ∈ {0, · · · , n} that counts the number
1018
+ vertices that are adjacent to all other vertices. Clearly E[V ] = npn−1 = n(1− λ log(n)
1019
+ n
1020
+ )n−1 = Θ( n
1021
+ nλ ).
1022
+ Now we shall calculate V ar(V ). Let Ii denote the indicator random variable that vi is adjacent
1023
+ to all other vertices. Then,
1024
+ V ar(V ) = nV ar(I1) + n(n − 1)cov(I1, I2)
1025
+ = npn−1(1 − pn−1) + n(n − 1)(p2n−3 − p2n−2)
1026
+ Thus,
1027
+ Pr{V = 0} ≤ V ar(V )
1028
+ (E[V ])2
1029
+ ≤ npn−1(1 − pn−1) + n(n − 1)(p2n−3 − p2n−2)
1030
+ n2p2n−2
1031
+ ≤ 1/(npn−1) + (1/p − 1)
1032
+ ≤ Θ(nλ
1033
+ n ) +
1034
+ λ log(n)
1035
+ n − λ log(n) = on(1)
1036
+ 18
1037
+
1038
+ Thus V ≥ 1 a.a.s.
1039
+ References
1040
+ [1] N. Alon and J. Spencer. The Probabilistic Method. Wiley, New York, 3rd edition, 2008.
1041
+ [2] Lior Aronshtam and Nathan Linial. When does the top homology of a random simplicial
1042
+ complex vanish? Random Struct. Algorithms, 46(1):26–35, 2015. doi:10.1002/rsa.20495.
1043
+ [3] Lior Aronshtam and Nathan Linial. The threshold for d-collapsibility in random complexes.
1044
+ Random Struct. Algorithms, 48(2):260–269, 2016. doi:10.1002/rsa.20585.
1045
+ [4] Lior Aronshtam, Nathan Linial, Tomasz Luczak, and Roy Meshulam. Collapsibility and
1046
+ vanishing of top homology in random simplicial complexes.
1047
+ Discret. Comput. Geom.,
1048
+ 49(2):317–334, 2013. doi:10.1007/s00454-012-9483-8.
1049
+ [5] Dominique Attali, Andr´e Lieutier, and David Salinas. Vietoris-rips complexes also provide
1050
+ topologically correct reconstructions of sampled shapes. Computational Geometry, 46(4):448–
1051
+ 465, 2013.
1052
+ [6] J. A. Barmak and E. G. Minian. Strong homotopy types, nerves and collapses. Discrete
1053
+ and Computational Geometry, 47:301–328, 2012.
1054
+ [7] J-D. Boissonnat and S. Pritam. Computing persistent homology of flag complexes via strong
1055
+ collapses. International Symposium on Computational Geometry (SoCG), 2019.
1056
+ [8] J-D. Boissonnat and S. Pritam. Edge collapse and persistence of flag complexes. International
1057
+ Symposium on Computational Geometry (SoCG), 2020.
1058
+ [9] J-D. Boissonnat, S.Pritam, and D. Pareek. Strong Collapse for Persistence. In 26th Annual
1059
+ European Symposium on Algorithms (ESA 2018), volume 112, 2018.
1060
+ [10] B´ela Bollob´as. Random graphs. Number 73 in Cambridge studies in advanced mathematics.
1061
+ Cambridge University Press, 2 edition, 2001.
1062
+ [11] David A. Freedman. On Tail Probabilities for Martingales. The Annals of Probability,
1063
+ 3(1):100 – 118, 1975.
1064
+ [12] Marc Glisse and Siddharth Pritam. Swap, Shift and Trim to Edge Collapse a Filtration.
1065
+ In 38th International Symposium on Computational Geometry (SoCG 2022), volume 224,
1066
+ pages 44:1–44:15, 2022.
1067
+ [13] A. Hatcher. Algebraic Topology. Univ. Press Cambridge, 2001.
1068
+ [14] Matthew Kahle. Random simplicial complexes, 2016. arXiv:1607.07069.
1069
+ [15] DMITRY N. KOZLOV. The threshold function for vanishing of the top homology group of
1070
+ random d-complexes. Proceedings of the American Mathematical Society, 138(12):4517–4527,
1071
+ 2010. URL: http://www.jstor.org/stable/41059187.
1072
+ [16] Nathan Linial and Roy Meshulam.
1073
+ Homological connectivity of random 2-complexes.
1074
+ Comb., 26(4):475–487, 2006. URL: https://doi.org/10.1007/s00493-006-0027-9, doi:
1075
+ 10.1007/s00493-006-0027-9.
1076
+ [17] Nathan Linial and Yuval Peled. Random simplicial complexes: around the phase transition.
1077
+ A Journey Through Discrete Mathematics, pages 543–570, 2017.
1078
+ 19
1079
+
1080
+ [18] Greg Malen.
1081
+ Collapsibility of random clique complexes.
1082
+ Discrete Mathematics,
1083
+ 346(3):113267, 2023.
1084
+ URL: https://www.sciencedirect.com/science/article/pii/
1085
+ S0012365X22004733, doi:https://doi.org/10.1016/j.disc.2022.113267.
1086
+ [19] Roy Meshulam and N. Wallach. Homological connectivity of random k-dimensional com-
1087
+ plexes. Random Struct. Algorithms, 34(3):408–417, 2009. URL: https://doi.org/10.
1088
+ 1002/rsa.20238, doi:10.1002/rsa.20238.
1089
+ [20] LUTZ WARNKE. On the method of typical bounded differences. Combinatorics, Probability
1090
+ and Computing, 25(2):269–299, 2016. doi:10.1017/S0963548315000103.
1091
+ [21] J. H. C Whitehead. Simplicial spaces nuclei and m-groups. Proc. London Math. Soc,
1092
+ 45:243–327, 1939.
1093
+ [22] A. C. Wilkerson, H. Chintakunta, and H. Krim. Computing persistent features in big data:
1094
+ A distributed dimension reduction approach. In International Conference on Acoustics,
1095
+ Speech, and Signal Processing (ICASSP), pages 11–15, 2014.
1096
+ [23] Siddharth Pritam ´Angel Javier Alonso, Michael Kerber. Filtration-Domination in Bifiltered
1097
+ Graphs. In SIAM Symposium on Algorithm Engineering and Experiments (ALENEX23),
1098
+ 2023.
1099
+ 20
1100
+
29E1T4oBgHgl3EQf5wUG/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2dAyT4oBgHgl3EQfbvf7/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:514ba34315e24cbfbfddb00e6973c979672cc130a357ace867b547030c9c1bae
3
+ size 14876717
2dFAT4oBgHgl3EQfDhxB/content/tmp_files/2301.08416v1.pdf.txt ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Machine Translation for Accessible Multi-Language Text Analysis
2
+ Edward W. Chew1, William D. Weisman1, Jingying Huang1, Seth Frey1,*
3
+ 1 Department of Communication, University of California Davis, Davis, CA, USA
4
+ * Correspondence: Seth Frey ([email protected]) 376 Kerr Hall, 1 Shields Dr., Davis,
5
+ CA 95616, USA
6
+
7
+ Machine Translation for Accessible Multi-Language Text Analysis
8
+ Abstract
9
+ English is the international standard of social research, but scholars are increasingly conscious
10
+ of their responsibility to meet the need for scholarly insight into communication processes
11
+ globally. This tension is as true in computational methods as any other area, with
12
+ revolutionary advances in the tools for English language texts leaving most other languages
13
+ far behind. In this paper, we aim to leverage those very advances to demonstrate that
14
+ multi-language analysis is currently accessible to all computational scholars. We show that
15
+ English-trained measures computed after translation to English have adequate-to-excellent
16
+ accuracy compared to source-language measures computed on original texts. We show this for
17
+ three major analytics—sentiment analysis, topic analysis, and word embeddings—over 16
18
+ languages, including Spanish, Chinese, Hindi, and Arabic. We validate this claim by
19
+ comparing predictions on original language tweets and their backtranslations: double
20
+ translations from their source language to English and back to the source language. Overall,
21
+ our results suggest that Google Translate, a simple and widely accessible tool, is effective in
22
+ preserving semantic content across languages and methods. Modern machine translation can
23
+ thus help computational scholars make more inclusive and general claims about human
24
+ communication.
25
+ Keywords: multi-language text analysis, multi-lingual text analysis, computational text
26
+ analysis, natural language processing, backtranslation, topic modeling, word embedding,
27
+ sentiment analysis.
28
+
29
+ Introduction
30
+ Humans communicate in thousands of languages, and yet a single language, English,
31
+ attracts the bulk of communication research. This not only has the effect of depriving other
32
+ languages of adequate attention, but depriving English-focused scholars of any sense of where
33
+ the language stands relative to others. The general use of English-trained tools for
34
+ English-focused analyses in the social science community is particularly notable given the
35
+ ubiquity of multi-lingual data and the power of modern computational natural language
36
+ processing. For example, social media researchers on Twitter typically begin with raw data
37
+ that is highly multilingual, before filtering out all tweets except for English, or some other
38
+ single language. With such practices scholars miss a tremendous opportunity to test the
39
+ generalizability of social media-observed big data claims. But bringing standard text analysis
40
+ tools to the level of training and refinement that English-trained tools receive is a forbidding
41
+ prospect that few multi-lingual scholars have the training, resources, and language
42
+ background to pursue. We propose a simple alternative approach that makes texts from over
43
+ 100 languages accessible to the full variety of analyses that are typically available to only
44
+ English-focused scholars. Specifically we demonstrate that modern machine translation has
45
+ reached a level of refinement necessary to preserve sentiment, lexical topics, and semantic
46
+ distance, making multi-language datasets legible to state of the art English-trained tools. By
47
+ providing a validation of state-of-the-art machine translation, along with easily adaptable
48
+ demonstration code, we aim to broaden the horizon of computational research and support
49
+ Communication scholars in increasing the relevance and generality of their work.
50
+ Google Translate, the most popular, accurate, and accessible multilingual neural
51
+ machine translation service, offers translations for over 133 languages (Caswell, 2022). In this
52
+
53
+ paper, we demonstrate the efficacy of Google Translate in retaining sentiment valence across
54
+ translations of large hand-coded and machine-coded Twitter datasets composed of tweets in
55
+ 16 global non-English languages from four language families, being of Indo-European,
56
+ Uralic, Semitic, and Sinitic origin. With our findings that Google Translate preserves the
57
+ sentiment of tweets, as well as other dimensions of semantics, scholars may be emboldened to
58
+ utilize Google Translate and other multilingual neural machine translation services to expand
59
+ the generalizability of their research. In so doing, non-English languages can benefit from
60
+ advanced English-trained natural language processing tools, and computational findings
61
+ normally restricted to the English language can be expanded upon to broaden scholars’
62
+ knowledge of global social phenomena.
63
+ Academics use Twitter datasets for a wide range of scholarship, including sentiment
64
+ analysis (e.g. Gautam & Yadav, 2014), algorithmic training (e.g. Braithwaite et al., 2016), and
65
+ even coronavirus disease 2019 detection (e.g. Gharavi et al., 2020). English-language corpora
66
+ have been used to predict election results (Nausheen & Begum, 2018), analyze consumer
67
+ preferences (Ahmed & Danti, 2016), and explore pro- and anti-childhood vaccine
68
+ communities’ influence on Twitter (Featherstone et al., 2020).
69
+ As valuable as this work is, it can only be more valuable extended across languages.
70
+ Frey et al. (2018) use a corpus of six languages to document general ripple effects of
71
+ emotional influence through others and back around to the self. Mocanu et al. (2013) use data
72
+ on 78 languages to characterize inter-linguistic diversity and intra-linguistic drift across
73
+ municipalities and regions. And Alshaabi et al. (2021) compare the dynamics of social
74
+ influence on Twitter over 150 languages. In other disciplines, large-scale multi-language
75
+ comparisons in other disciplines have identified universal patterns in the cross-language
76
+
77
+ naming of colors (Lindsey & Brown, 2009), a well as a universal preference for the shortening
78
+ of dependency length in human sentence production (Futrell et al., 2015).
79
+ Our research demonstrates the effectiveness of Google Translate on the maintenance
80
+ of sentiments, topic clusters, and semantic distance for tweets in all languages we examine.
81
+ We validate the approach using “backtranslation,” a classic validation method of machine
82
+ translation in which scholars compare an original text to a version of that text that has been
83
+ translated from its original language to another language (in our case English) and then back
84
+ again to the original (Figure 1). This makes it possible to directly compare the accuracy of
85
+ English-trained tools on English translations to original-language-trained tools on
86
+ original-language texts, while controlling for semantic drift introduced by the translation
87
+ process itself. We first test the preservation of sentiments using two large public multi-lingual
88
+ Twitter datasets, one with hand-coded sentiments (Mozetič et al., 2016) and another with
89
+ machine-coded sentiments (Imran et al., 2022) for this analysis. The second portion of our
90
+ research applies the same two datasets to show that Google Translate preserves topic clusters
91
+ after backtranslation, thereby demonstrating a similar level of semantic conservation for this
92
+ second common text analysis task. In the third and final portion of our present study we
93
+ demonstrate the effectiveness of out-of-the-box machine translation on a third common text
94
+ analysis approach: neural word embeddings. Here, after backtranslation, 10 of 16 languages
95
+ examined performed better than chance in maintaining a minimal embedding distance. Our
96
+ findings provide strong support for the use of modern machine translation for expanding
97
+ academic attention to the languages spoken by most humans.
98
+ Method
99
+
100
+ Datasets
101
+ We utilized two large, multilingual Twitter datasets. First, we examine the Mozetič et
102
+ al. (2016) dataset, which contains over 1.6 million general Twitter posts hand-labeled as
103
+ containing “positive”, “negative”, or “neutral” labels for 15 European languages: Albanian,
104
+ Bosnian, Bulgarian, Croatian, English, German, Hungarian, Polish, Portuguese, Russian,
105
+ Serbian, Slovak, Slovenian, Spanish, and Swedish. To expand the scope of our research
106
+ beyond European languages, we added tweets from the Imran et al. (2022) COVID-19 dataset,
107
+ a larger (70 million tweet) corpus including tweets in Chinese, Hindi, and Arabic. While these
108
+ two datasets are comparable (both include sentiment labels), they differ in subject and date, as
109
+ well as in how they determined sentiment scores. Those in Mozetič et al. (2016) were applied
110
+ by human language-domain experts, while tweets from Imran et al. (2022) dataset were
111
+ determined by algorithms (all trained within-language).
112
+ Data cleaning
113
+ Before translation and subsequent analysis, we preprocessed all Twitter data to remove
114
+ Twitter handles, return handles, URLs, numbers, empty tweets, and converting all content to
115
+ lowercase. We dropped Serbian from our analysis halfway through the study, after discovering
116
+ that the Mozetič dataset contains Cyrillic Serbian, but Google Translate only supports
117
+ Latin-character Serbian. We obviously excluded all English language tweets from validation
118
+ by backtranslation through English.
119
+ To reduce our dataset to a more manageable, and affordable, size (the Google
120
+ Translate API is paid), we randomly sampled 30,000 tweets from each of the 13 applicable
121
+
122
+ European languages from Mozetič et al. (2016) dataset, and 10,000 tweets from Chinese,
123
+ Hindi, and Arabic from the Imran et al. (2022) dataset, for a total of 16 languages.
124
+ Translation process
125
+ Utilizing the Google Translate API, we translate all tweets from their “original
126
+ language” datasets into English, saving the results as our “English translated” dataset. We then
127
+ translate all the English translated tweets back to their original language, saving it as our
128
+ “backtranslated” dataset (Figure 1). Our results are based only partly on three-way
129
+ comparisons between these datasets. Where there is not a meaningful correspondence between
130
+ English- and original-language analyses we use only two-way comparisons between the
131
+ original and backtranslated datasets.
132
+ With this manuscript we share the scripts and instructions, to enable researchers to
133
+ easily extend their single-language corpus research to multiple languages. The code is
134
+ available at https://osf.io/jx476/.
135
+ Sentiment analysis
136
+ We conduct our sentiment analysis task with the free open-source software Polyglot
137
+ (Al-Rfou, 2015). Polyglot allows the generation of sentiment labels in more than 100
138
+ languages, with “-1” indicating negative sentiment, “0” indicating neutral sentiment, and “1”
139
+ indicating positive sentiment for each word in each original-language tweet. Based on the
140
+ difference between the number of positive sentiment words and negative sentiment words, we
141
+ generate an overall polarity for each tweet. Polyglot’s lexicon-based sentiment analysis relies
142
+ on a valence dictionary of positive and negative words, computing the sentiment of a text as
143
+
144
+ the simple sum of the valences of its words, normalized back down to the [-1, 1] interval. Our
145
+ pipeline excluded neutrally labeled tweets: as a result of Polyglot’s lexicon-based sentiments,
146
+ short texts like Twitter posts are overwhelmingly labeled as neutral which makes it difficult to
147
+ distinguish the performance of sentiment analyses across translations.
148
+ We computed confidence intervals around the accuracy of each language’s sentiments
149
+ with bootstrapping (1000x). The final sentiment accuracies are the bootstrapped medians.
150
+ Topic clustering
151
+ While sentiment analysis is a common application for natural language tools, it only
152
+ serves to answer a small range of questions. We expand our investigation of Google
153
+ Translate’s ability to preserve the content of translated text through topic analysis. Compared
154
+ to sentiment analysis, topic analysis provides a more technical, but much more flexible
155
+ approach to computationally representing the meanings of text. Although the process of topic
156
+ analysis is language agnostic, common computational tools are typically built to only support
157
+ the English language, from stopwords to supported character sets.
158
+ We model our topic clustering approach after Yin and Wang (2014) who present the
159
+ open-source software GSDMM (“Gibbs Sampling algorithm of the Dirichlet Multinomial
160
+ Mixture). GSDMM is trained upon short text posts such as those found within social media
161
+ environments such as Twitter (Yin & Wang, 2014). We follow the original work’s data
162
+ cleaning steps of removing both emojis and stopwords. We excluded Albanian and Bosnian
163
+ due to their incompatibility with our data cleaning dependencies.
164
+
165
+ Our cluster analysis process was as follows. For each language, we used a total of five
166
+ iterations of the clustering algorithm. We then classified the backtranslated tweets to the
167
+ clusters generated on the original language tweets. To estimate the success of machine
168
+ translation at semantic preservation under topic analysis, we computed the proportion of
169
+ backtranslated tweet that were correctly assigned to the cluster of their original-language
170
+ version. We compare these proportions to the baseline “null” proportions expected by chance,
171
+ as derived from random permutations of original cluster assignments. Like many clustering
172
+ algorithms, GSDMM requires researchers to impose a desired number of clusters, rather than
173
+ identifying the number of clusters through the same emergent process as cluster assignments.
174
+ But the ability of backtranslation to preserve topic clusters depends on the number of clusters.
175
+ Therefore, we observe the effectiveness of topic preservation across a range of clusterings by
176
+ training models on each original language dataset for 2, 5, 10, 15, 20, 50, 100, 150, and 200
177
+ clusters.
178
+ Unlike with our evaluation of sentiment analysis, the analysis of topics is only able to
179
+ compare original language and backtranslated analytics: it is not able to compare either to
180
+ English. While the framework of sentiment analysis imposes the same meaning to the idea of
181
+ “positive” and “negative” sentiments across languages, topics emerge from a narrow
182
+ understanding of a word as the sequence of characters that constitutes it. To the extent that
183
+ original language and backtranslated tweets use the same characters (as in languages
184
+ borrowings from each other), they can be assigned to the same set of clusters. But lexicons in
185
+ English and each original language are mostly non-overlapping, and there is ultimately no
186
+ basis to map English translations to original-language-derived topics.
187
+ Word embedding
188
+
189
+ Polyglot (Al-Rfou, 2015) also supports semantic word embeddings across its
190
+ languages. We determine semantic preservation under word embedding by embedding
191
+ original and backtranslated tweets (as the normalized sum of the embeddings of their words)
192
+ and calculating their (cosine) distance in the embedding’s high-dimensional semantic space.
193
+ Under this formalism, a distance of zero indicates perfect preservation of semantics after
194
+ translation. Because Polyglot has a different semantic space for each language, it was not
195
+ possible to compare the distance of the intermediate English texts to the original and
196
+ backtranslated texts.
197
+ To measure how well machine translation preserves semantics under word embedding,
198
+ we compared the embedding distances after backtranslation to two baseline distances. We
199
+ computed the average minimum distance of tweets from 5,000 other tweets in that language
200
+ and their average average distances. Our rationale is that meaning is preserved despite
201
+ semantic drift imposed by the process of (double) machine translation if the average distance
202
+ of backtranslated tweets from their originals is smaller than the average distances of different
203
+ original language tweets from each other.
204
+ Results
205
+ Our primary finding is that Google Translate is faithful enough to preserve the
206
+ semantics of multilingual texts under three common text analysis tasks: sentiment analysis,
207
+ topic analysis, and word embeddings.
208
+ Application 1: Preservation of sentiment
209
+
210
+ We find that overall accuracy of sentiment scores decreases less than 1% after
211
+ backtranslation, from a median 65.17% accuracy (with a very tight 99% high-confidence
212
+ interval (HCI) of [65.16, 65.18]) to 64.76% [64.75, 64.77]. While small, this decline was
213
+ statistically significant, as measured by the separation of the 99% HCI bars. We display this
214
+ result in Figure 2, below.
215
+ We did have one surprise from this process. We expected that the accuracy of
216
+ English-trained sentiment on the English-translated tweets would be between or below the
217
+ accuracy of the original or backtranslated tweets, whose “ground truth” sentiments were
218
+ computed with models trained specifically for those languages. Instead median sentiment
219
+ accuracy increases 4.95% following original languages’ translation into English (original
220
+ language median accuracy: 65.17%, HCI [65.16, 65.18]; English translated median accuracy:
221
+ 70.12%, HCI [70.11, 70.13]). Somehow sentiments extracted from English translations are
222
+ more accurate than sentiments of original language tweets, despite the process of translation
223
+ in between (Figure 2). We speculate on this result in the Discussion section.
224
+ Looking specifically at how different languages performed, we found the expected
225
+ decrease in accuracy rates between the original language datasets and the backtranslated
226
+ datasets for Albanian, Arabic, Chinese, Slovak, and Spanish (Figure 3). Unexpectedly, the
227
+ remaining language datasets, belonging to the languages of Bosnian, Bulgarian, Croatian,
228
+ German, Hindi, Hungarian, Polish, Portuguese, Russian, Slovenian, and Swedish, experienced
229
+ an increase in sentiment accuracy from original language to backtranslated form. Although
230
+ languages on average showed higher accuracy in English translation, the original language
231
+ datasets of Portuguese, Russian, Slovenian, and Swedish show a drop in sentiment accuracy
232
+
233
+ when translated to English (while the remaining others, Albanian, Bosnian, Bulgarian,
234
+ Croatian, English, German, Hungarian, Polish, Slovak, and Spanish all improve).
235
+ Application 2: Preservation of lexical topic assignments
236
+ Our primary finding from Application 2 is that machine translation also preserves
237
+ topic structure (Figure 4). Specifically, backtranslated tweets are assigned to the same cluster
238
+ and their original language version at a rate well above chance. As expected, the ability of
239
+ backtranslation to preserve topic structure declines as the number of topics increases (Figure
240
+ 5). Performance accuracy of topic cluster preservation is 78% when there are two topic
241
+ clusters, and declines to about 60% when there are 10 or more clusters. However, chance
242
+ accuracy declines much faster, from 52% to 10% over the same span. In other words, the
243
+ relative accuracy of topic recovery actually improves with the number of clusters, even as
244
+ absolute efficacy declines.
245
+ It may seem peculiar that chance performance of topic assignment remains at 10%
246
+ even with 200 clusters. This is probably due to an unequal distribution of cluster sizes. For
247
+ 200 equally sized clusters, the baseline, null probability of a backtranslated tweet being
248
+ randomly assigned to its correct topic is 0.05%, one half of a percent. Chance performance
249
+ higher than this is easy to arrange in a system with a few large clusters and a large number of
250
+ very small ones.
251
+ Overall, we find that Google Translate preserves topic clusters across languages, with
252
+ accuracy ranging from 60% to 80% depending on the number of topics we set the GSDMM
253
+ algorithm to impose.
254
+
255
+ Application 3: Preservation of semantics in embedding space
256
+ In the final application of this work, we examine the multilingual preservation of
257
+ semantic vectors in high-dimensional neural embeddings after machine translation and
258
+ backtranslation.
259
+ On average, original language tweets are significantly closer to their backtranslations
260
+ than to other original language tweets in the same collection (Figure 6). Across languages,
261
+ average distances of original language tweets from each other are 0.041–0.184 units, their
262
+ minimum distances from each other are 0.028–0.132, and their distances from their
263
+ backtranslations are 0.203–0.492. Being less than half of the average distance (except
264
+ Chinese) and below or slightly above the minimum distance, we can conclude that the
265
+ semantic change introduced by the translation algorithm is enough to change the meaning of a
266
+ backtranslated tweet to be mistakable for a different closely related tweet, but not the typical
267
+ more distantly related tweet.
268
+ Of the 16 languages involved in the analysis, 6 languages (Albanian, Arabic, Chinese,
269
+ German, Hindi, and Portuguese) failed the minimum baseline test, with backtranslated tweets
270
+ having greater semantic distance from their originals than the average closest outside tweet.
271
+ The Albanian, German, and Portuguese corpora failed by small margins (mean distance of
272
+ 0.065 compared to minimum baseline distance 0.059 in Albanian; distance 0.110 compared to
273
+ baseline 0.094 in German; 0.089 against 0.085 in Portuguese). But in Arabic, Chinese, and
274
+ Hindi, embeddings of translations were even further from their original (distance 0.146
275
+ against 0.101 in Arabic; 0.184 against 0.053 in Chinese; 0.041 against 0.028 in Hindi). It
276
+ should be noted that Arabic, Chinese, and Hindi were drawn from the Imran et al. (2022)
277
+
278
+ dataset focused on COVID-19 related tweets, included as part of our effort to expand this
279
+ project’s analysis beyond languages of European origin. As baseline measures were calculated
280
+ on the distance between random tweets relative to their distance with all other tweets, and
281
+ these tweets were semantically more closely related, these languages’ baseline measures may
282
+ have been especially narrow relative to those of the other languages as a result of their shared
283
+ topic. Although they failed the rigorous minimum distance test, even Arabic, Chinese, and
284
+ Hindi passed the mean distance test: they were closer in meaning to their original than the
285
+ average tweet (mean baseline distances 0.416, 0.352, and 0.203, respectively).
286
+ Discussion
287
+ As the global academic world becomes increasingly interconnected, Communication
288
+ scholars must meet the challenge to make claims about communication processes more
289
+ globally relevant. Fortunately, with recent advances in natural language processing,
290
+ quantitative Communication research has an opportunity to be multilingual by default.
291
+ Advances that bring equal attention to more of the world’s languages will not only provide
292
+ greater generality of results, but greater attention to the work of Communication scholars from
293
+ all parts of the world. Standard approaches to large multilingual corpora will also allow the
294
+ rapid transfer of groundbreaking knowledge to and from the international Communication
295
+ community.
296
+ Of course, these advances have downsides. When multi-language analyses are
297
+ conducted by scholars who can’t speak all of those languages, it becomes harder for them to
298
+ “gut check” or “sanity check” their results, culturally contextualize those results, and interpret
299
+ whatever valid cross-linguistic differences that do appear,. By encouraging researchers to
300
+
301
+ conduct multilingual studies by default, we are almost necessarily advocating for a
302
+ circumstance in which scholars are making conclusions about languages that they do not
303
+ know. Although this approach has some acceptance in other fields, such as large-scale
304
+ comparative linguistics, it would be understandable to see it as controversial. As novel as this
305
+ problem may be, the way forward may not be novel at all. Quantitative and qualitative
306
+ methods have a fundamental complementarity, with the former bringing generality as the
307
+ latter brings depth and sensitivity to context. By supporting the summary quantitative claims
308
+ of non-speakers with citations to other work by native speakers and other domain experts,
309
+ scholars may be able to justify not knowing the languages they are engaging with. This
310
+ complementary approach will be particularly valuable for understanding outliers. In the case
311
+ of our research, Chinese, Arabic, and Hindi all perform worse than the other languages.
312
+ Having ruled out explanations that go to the phylogeny, character set, and geography of these
313
+ languages, domain experts become the best candidates for understanding how and why
314
+ specific languages deviate from the majority of their peers. This illustrates the importance of
315
+ framing our contribution as a complement to expert-based multi-language communication
316
+ research, rather than a substitute.
317
+ In this work we are able to validate the performance of Google Translate by leveraging
318
+ source-language versions of our three methods: sentiment analysis, topic analysis, and word
319
+ embeddings. However, as others make use of machine translation, they will not have the
320
+ comfort of source-language tools, and may feel that they are “flying blind”. Although we
321
+ succeed in showing that translation introduces negligible drift, it may still be uncomfortable to
322
+ apply it to a new dataset, particularly with text analysis methods beyond the three that we
323
+ validate here (such as custom classifiers). To address this concern, researchers can take not
324
+
325
+ only our conclusions, but the backtranslation method itself, to perform partial validations for
326
+ their own case. Most likely, it should be possible to find “home language” tools for at least a
327
+ handful of languages in a larger corpus. If an author can show satisfactory and stable
328
+ performance across this subset, by comparing original and backtranslated texts, they can
329
+ assure their audience that the method is probably working for other languages as well. Even
330
+ lacking such “ground truth,” there may be ways of using our method to instill confidence in a
331
+ multilingual result. For example, a scholar could perform iterative backtranslations to
332
+ calculate how many cycles must be introduced for the statistical significance of their result to
333
+ degrade below threshold. If it takes a large number of backtranslations to degrade a result,
334
+ readers can have confidence that artifacts introduced by the method are not sufficient to
335
+ explain those results. Conversely, if machine translation is artificially amplifying a result,
336
+ scholars can measure this effect with iterated backtranslation to suggest an appropriate amount
337
+ of caution.
338
+ Another design choice of this work ensures the generality of the method we introduce.
339
+ All three applications of this work were performed with tweets. Tweets are short, making
340
+ them challenging for text analysis methods like sentiment and topic analysis. That our method
341
+ is very effective on challenging text is encouraging for scholars who would extend this
342
+ method to more typical (longer) texts.
343
+ A limitation of our approach is its accessibility. We have argued that Google Translate
344
+ is very accessible, and this is true in that it requires a small amount of code (that we provide)
345
+ to translate large quantities of text to English. However, our approach is not as financially
346
+ accessible. The Google’s Translation API costs $20 USD per million characters. In practice,
347
+ this translates to roughly $100 USD per 130,000 tweets. Fortunately, free translation tools of
348
+
349
+ comparable quality are increasingly common, and can also be validated in practice using
350
+ backtranslation.
351
+ One surprising result from this work was that the accuracy of sentiment detection after
352
+ translation into English, and in some cases after backtranslation, was higher than in the
353
+ original texts. This finding is easier to understand with an appreciation of how sentiment
354
+ analysis works in libraries like Polyglot. Polyglot uses the “dictionary” method, in which
355
+ hundreds to thousands of high-frequency words are given sentiment scores, and the score of a
356
+ statement is calculated from the sum of scores of the subset of words that are in the detector’s
357
+ sentiment dictionary. If the dictionary is large, or the text is long, then its assigned sentiment
358
+ score will be based on a lot of signal. Consequently, this method is less suitable for rarer
359
+ languages and shorter texts (like tweets), which are less likely to contain scored words. It is
360
+ also more suitable to texts with more common words, since uncommon words are less likely
361
+ to appear in a language’s sentiment dictionary.
362
+ Why would translation to English, or backtranslation from English, improve task
363
+ performance? Translation to English may be improving dictionary-based sentiment detection
364
+ because English-language sentiment dictionaries tend to be longer. And subsequent
365
+ backtranslation may be improving detection performance if it results in uncommon unscored
366
+ words from the source text being backtranslated into more common words that are scored. We
367
+ believe that this finding can be explained by Polyglots’ apparent relative greater capacity to
368
+ detect sentiment in English language content, relative to the content of other languages. This
369
+ result underscores the need to validate Google Translate for each natural language task that it
370
+ is being used to support.
371
+ Conclusion
372
+
373
+ There is an unmet need to extend Communication scholars’ applications of text
374
+ analysis to more languages, particularly in the data-rich context of social media studies.
375
+ Translation tools such as Google Translate can be immensely helpful in meeting this need. We
376
+ have quantified Google Translate’s effectiveness in maintaining sentence meaning in
377
+ translations to and from English. Across 16 non-English languages, sentiment analysis scores
378
+ were shown to improve when translated to English, and only diminish marginally when
379
+ translated back to their original languages. Similarly, both topic and semantic distances are
380
+ preserved during backtranslation. Our findings demonstrate that machine translation is able to
381
+ preserve semantic content and make non-English datasets legible to English-trained
382
+ computational tools. We hope this analysis gives researchers the confidence to use machine
383
+ translation to simply and economically increase the number of languages involved in their
384
+ research, and thereby the generality of their findings.
385
+ Acknowledgements
386
+ We would like to thank Arti Thakur, Communication Ph.D. Candidate at the
387
+ University of California, Davis, for her assistance with the analysis. All code is available at
388
+ https://osf.io/jx476/. The authors report there are no competing interests to declare.
389
+
390
+ Figure 1
391
+ We compare analytics computed on texts in their original languages to translated English
392
+ language analytics and texts translated back to the original language. Differences between
393
+ the original and translated texts are typically difficult to attribute to semantic differences
394
+ between the languages and “semantic” imposed by poor translation. Comparing original and
395
+ backtranslated texts enables us to control for the effect of drift and focus on semantics.
396
+
397
+ Original
398
+ Intermediate
399
+ Backtranslated
400
+ source
401
+ English
402
+ source
403
+ language
404
+ language
405
+ language
406
+ text
407
+ text
408
+ textFigure 2
409
+ Sentiment analysis overall retains accuracy after backtranslation by machine methods.
410
+ Median sentiment detection accuracy increases 4.9% from original language to English
411
+ translated language datasets, and falls less than 1% from original language datasets to
412
+ backtranslated language datasets. Note that 99% error bars are too narrow to be displayed.
413
+
414
+ 0.701
415
+ 0.652
416
+ 0.648Figure 3
417
+ Comparison of sentiment labeling accuracy across languages, before, during, and after
418
+ backtranslation. Seventeen language sentiment detection accuracy from original language >
419
+ English translated > backtranslated datasets. Note that 99% error bars are too narrow to be
420
+ displayed.
421
+ Figure 4
422
+
423
+ Albanian
424
+ Arabic
425
+ Bosnian
426
+ Bulgarian
427
+ Chinese
428
+ 1.0
429
+ 1.0
430
+ 1.0
431
+ 1.0
432
+ 1.0
433
+ 0.837
434
+ 0.783
435
+ 0.8 -
436
+ 0.727
437
+ 0.8
438
+ 0.756
439
+ 0.751
440
+ 0.8
441
+ 0.8
442
+ 0.8
443
+ 0.692
444
+ 0.683
445
+ 0.689
446
+ 0.654
447
+ 0.626
448
+ 0.662
449
+ 0.666
450
+ 0.615
451
+ 0.594
452
+ 0.410
453
+ 0.2
454
+ 0.2
455
+ 0.2
456
+ 0.2
457
+ 0.2
458
+ 0.0
459
+ 0.0 -
460
+ 0.0
461
+ 0.0
462
+ 0.0
463
+ Croatian
464
+ English
465
+ German
466
+ Hindi
467
+ Hungarian
468
+ 1.0
469
+ 1.0
470
+ 1.0
471
+ 1.0
472
+ 1.0
473
+ 0.892
474
+ 0.794
475
+ 0.788
476
+ 0.813
477
+ 0.8
478
+ 0.742
479
+ 0.8 -
480
+ 0.8
481
+ 0.705
482
+ 0.8
483
+ 0.8 -
484
+ 0.742
485
+ 0.676
486
+ 0.681
487
+ 0.665
488
+ 0.678
489
+ 0.653
490
+ 0.654
491
+ 0.2
492
+ 0.2
493
+ 0.2
494
+ 0.2
495
+ 0.2 -
496
+ 0.000
497
+ 0.000
498
+ 0.0
499
+ 0.0
500
+ 0.0
501
+ 0.0
502
+ 0.0
503
+ Polish
504
+ Portuguese
505
+ Russian
506
+ Slovak
507
+ Slovenian
508
+ 1.0 -
509
+ 1.0
510
+ 1.0
511
+ 1.0
512
+ 1.0
513
+ 0.8
514
+ 0.8 -
515
+ 0.8
516
+ 0.8
517
+ 0.8
518
+ 0.720
519
+ 0.733
520
+ 0.693
521
+ 0.663
522
+ 0.670
523
+ 0.695
524
+ 0.627
525
+ 0.646
526
+ 0.610
527
+ 0.645
528
+ 0.644
529
+ 0.614
530
+ 0.580
531
+ 0.584
532
+ 0.564
533
+ 0.2
534
+ 0.2 -
535
+ 0.2 -
536
+ 0.2
537
+ 0.2
538
+ 0.0
539
+ 0.0
540
+ 0.0
541
+ 0.0
542
+ 0.0
543
+ Swedish
544
+ Spanish
545
+ 1.0 -
546
+ 1.0
547
+ 0.8-
548
+ 0.761
549
+ English Translated w/o Neutrals
550
+ 0.8 -
551
+ 0.594
552
+ 0.611
553
+ 0.548
554
+ 0.502
555
+ 0.467
556
+ Original Language w/o Neutrals
557
+ 0.2 -
558
+ 0.2 -
559
+ Backtranslated w/o Neutrals
560
+ 0.0
561
+ 0.0Machine translation preserves topic clusters across languages, regardless of number of
562
+ topics. Percentage of backtranslated tweets assigned to the same cluster as original language
563
+ tweets by language. White text denotes permutation test accuracy. The 14 languages examined
564
+ demonstrate topic clustering accuracy preserved above chance.
565
+ 2 Clusters
566
+ 100 Clusters
567
+
568
+ 0.930
569
+ 0.882
570
+ 0.876
571
+ 0.817
572
+ 0.831
573
+ 0.827
574
+ 0.786
575
+ 0.760
576
+ 0.764
577
+ 0.748
578
+ 0.740
579
+ 0.670
580
+ 0.636
581
+ 0.610
582
+ 0.590
583
+ 0.546
584
+ 0.526
585
+ 0.537
586
+ 0.514
587
+ 0.511
588
+ 0.514
589
+ 0.506
590
+ 0.501
591
+ 0.502
592
+ 0.501
593
+ 0.505
594
+ 0.512
595
+ 0.4980.868
596
+ 0.785
597
+ 0.794
598
+ 0.685
599
+ 0.684
600
+ 0.651
601
+ 0.592
602
+ 0.596
603
+ 0.570
604
+ 0.538
605
+ 0.494
606
+ 0.474
607
+ 0.361
608
+ 0.293
609
+ 0.229
610
+ 0.198
611
+ 0.170
612
+ 0.118
613
+ 0.094
614
+ 0.097
615
+ 0.093
616
+ 0.054
617
+ 0.063
618
+ 0.056
619
+ 0.058
620
+ 0.019
621
+ 0.037
622
+ 0.024Figure 5
623
+ Recovery of topics after backtranslation declines in an absolute sense with number of
624
+ topics, but increases relative to baseline. Average topic cluster accuracy by size of topic
625
+ cluster. Note that topic cluster accuracy decreases from two to ten clusters, whereby it is
626
+ approximately stable to 200 clusters.
627
+
628
+ Arabic
629
+ Bulgarian
630
+ Chinese
631
+ Croatian
632
+ German
633
+ Hindi
634
+ Hungarian
635
+ Polish
636
+ Portuguese
637
+ Russian
638
+ Slovak
639
+ Slovenian
640
+ Spanish
641
+ Swedish
642
+ Average
643
+ BaselineFigure 6
644
+ Tweets are closer to their backtranslations on average than to other tweets.
645
+ Average distance between original language and backtranslated sentence embeddings by
646
+ language. Black lines denote the mean baseline distance and blue lines denote the minimum
647
+ baseline distance. All 16 languages have mean distances below their mean baseline. All
648
+ languages but Albanian, Arabic, Chinese, German, Hindi, and Portuguese have mean
649
+ distances below their minimum baseline. In these languages, tweets backtranslated tweets are
650
+ further from their source tweet in meaning than tweets that are very semantically similar to
651
+ the source, but tweets in these languages are still consistently closer to their source than the
652
+ average tweet.
653
+
654
+ 0.492
655
+ 0.428
656
+ 0.416
657
+ 0.408
658
+ 0.393
659
+ 0.399
660
+ 0.380
661
+ 0.352
662
+ 0.343
663
+ 0.317
664
+ 0.315
665
+ 0.314
666
+ 0.242
667
+ 0.226
668
+ 0.228
669
+ 0.203
670
+ 0.184
671
+ 0.132
672
+ 0.146
673
+ 0.116
674
+ 0.106
675
+ 0.094
676
+ 0.098
677
+ 0.099
678
+ 0.095
679
+ 0.086
680
+ 0.085
681
+ 0.101
682
+ 0.110
683
+ 0.078
684
+ 0.059
685
+ 0.060
686
+ 0.097
687
+ 0.091
688
+ 0.100
689
+ 0.065
690
+ 0.085
691
+ 0.053
692
+ 0.089
693
+ 0.076
694
+ 0.085
695
+ 0.065
696
+ 0.067
697
+ 0.073
698
+ 0.059
699
+ 0.028
700
+ 0.041
701
+ 0.044References
702
+ Ahmed, S., & Danti, A. (2015). Effective sentimental analysis and opinion mining of web
703
+ reviews using rule based classifiers. Advances in Intelligent Systems and Computing,
704
+ 171–179. https://doi.org/10.1007/978-81-322-2734-2_18
705
+ Al-Rfou, R., Kulkarni, V., Perozzi, B., & Skiena, S. (2015). Polyglot-NER: Massive
706
+ Multilingual Named Entity Recognition. Proceedings of the 2015 SIAM International
707
+ Conference on Data Mining, Vancouver, British Columbia, Canada, April 30- May 2,
708
+ 2015. https://doi.org/10.48550/arXiv.1307.1662
709
+ Alshaabi, T., Dewhurst, D. R., Minot, J. R., Arnold, M. V., Adams, J. L., Danforth, C. M., &
710
+ Dodds, P. S. (2021). The growing amplification of social media: Measuring temporal
711
+ and social contagion dynamics for over 150 languages on Twitter for 2009–2020. EPJ
712
+ Data Science, 10(1), Art. 1. https://doi.org/10/gjq4qq
713
+ Braithwaite, S. R., Giraud-Carrier, C., West, J., Barnes, M. D., & Hanson, C. L. (2016).
714
+ Validating machine learning algorithms for Twitter data against established measures
715
+ of Suicidality. JMIR Mental Health, 3(2). https://doi.org/10.2196/mental.4822
716
+ Caswell, I. (2022, May 11). Google Translate learns 24 new languages. Google. Retrieved
717
+ December 15, 2022, from https://blog.google/products/translate/24-new-languages/
718
+ Chen, Y., & Skiena, S. (2014). Building sentiment lexicons for all major languages.
719
+ Proceedings of the 52nd Annual Meeting of the Association for Computational
720
+ Linguistics (Volume 2: Short Papers), 383–389. https://doi.org/10.3115/v1/p14-2063
721
+
722
+ Featherstone, J. D., & Barnett, G. A. (2020). Validating sentiment analysis on opinion mining
723
+ using self-reported attitude scores. 2020 Seventh International Conference on Social
724
+ Networks Analysis, Management and Security (SNAMS).
725
+ https://doi.org/10.1109/snams52053.2020.9336540
726
+ Featherstone, J. D., Barnett, G. A., Ruiz, J. B., Zhuang, Y., & Millam, B. J. (2020). Exploring
727
+ childhood anti-vaccine and pro-vaccine communities on Twitter – a perspective from
728
+ influential users. Online Social Networks and Media, 20, 100105.
729
+ https://doi.org/10.1016/j.osnem.2020.100105
730
+ Frey, S., Donnay, K., Helbing, D., Sumner, R. W., Bos, M. W. (2018) The rippling dynamics
731
+ of valenced messages in naturalistic youth chat. Behavior Research Methods
732
+ http://doi.org/cwbz
733
+ Futrell, R., Mahowald, K., & Gibson, E. (2015). Large-scale evidence of dependency length
734
+ minimization in 37 languages. Proceedings of the National Academy of Sciences,
735
+ 112(33), 10336–10341. https://doi.org/10.1073/pnas.1502134112
736
+ Gautam, G., & Yadav, D. (2014). Sentiment analysis of Twitter data using machine learning
737
+ approaches and semantic analysis. 2014 Seventh International Conference on
738
+ Contemporary Computing (IC3). https://doi.org/10.1109/ic3.2014.6897213
739
+ Gharavi, E., Nazemi, N., & Dadgostari, F. (2020). Early Outbreak Detection for Proactive
740
+ Crisis Management Using Twitter Data: COVID-19 a Case Study in the US. ArXiv.
741
+ https://doi.org/arXiv:2005.00475
742
+
743
+ Imran, M., Qazi, U., & Ofli, F. (2022). TBCOV: Two Billion multilingual COVID-19 tweets
744
+ with sentiment, entity, GEO, and gender labels. Data, 7(1), 8.
745
+ https://doi.org/10.3390/data7010008
746
+ Lindsey, D. T., & Brown, A. M. (2009). World color survey color naming reveals universal
747
+ motifs and their within-language diversity. Proceedings of the National Academy of
748
+ Sciences, 106(47), 19785–19790. https://doi.org/10.1073/pnas.0910981106
749
+ Mocanu, D., Baronchelli, A., Perra, N., Gonçalves, B., Zhang, Q., & Vespignani, A. (2013).
750
+ The Twitter of Babel: Mapping World Languages through Microblogging Platforms.
751
+ PLOS ONE, 8(4), e61981. https://doi.org/10/f4vdv4
752
+ Mozetič, I., Grčar, M., & Smailović, J. (2016). Multilingual twitter sentiment classification:
753
+ The role of human annotators. PLOS ONE, 11(5).
754
+ https://doi.org/10.1371/journal.pone.0155036
755
+ Nausheen, F., & Begum, S. H. (2018). Sentiment analysis to predict election results using
756
+ Python. 2018 2nd International Conference on Inventive Systems and Control (ICISC).
757
+ https://doi.org/10.1109/icisc.2018.8399007
758
+ Yin, J., & Wang, J. (2014). A Dirichlet multinomial mixture model-based approach for short
759
+ text clustering. Proceedings of the 20th ACM SIGKDD International Conference on
760
+ Knowledge Discovery and Data Mining, 233–242.
761
+ https://doi.org/10.1145/2623330.2623715
762
+
2dFAT4oBgHgl3EQfDhxB/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2tAzT4oBgHgl3EQffPw3/content/tmp_files/2301.01448v1.pdf.txt ADDED
@@ -0,0 +1,2181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ A deep local attention network for pre-operative
3
+ lymph node metastasis prediction in pancreatic
4
+ cancer via multiphase CT imaging
5
+ Zhilin Zheng, Xu Fang, Jiawen Yao, Mengmeng Zhu, Le Lu Fellow, IEEE, Lingyun Huang, Jing Xiao, Yu Shi,
6
+ Hong Lu, Jianping Lu, Ling Zhang, Chengwei Shao*, Yun Bian*
7
+ Abstract—Lymph node (LN) metastasis status is one of the
8
+ most critical prognostic and cancer staging factors for patients
9
+ with resectable pancreatic ductal adenocarcinoma (PDAC), or in
10
+ general, for any types of solid malignant tumors. Preoperative
11
+ prediction of LN metastasis from non-invasive CT imaging
12
+ is highly desired, as it might be straightforwardly used to
13
+ guide the following neoadjuvant treatment decision and surgical
14
+ planning. Most studies only capture the tumor characteristics
15
+ in CT imaging to implicitly infer LN metastasis and very few
16
+ work exploit direct LN’s CT imaging information. LN staging
17
+ is usually confirmed from pathological images acquired after
18
+ invasive procedures of biopsy or surgery. To the best of our
19
+ knowledge, this is the first work to propose a fully-automated
20
+ LN segmentation and identification network to directly facilitate
21
+ the LN metastasis status prediction task. Nevertheless LN seg-
22
+ mentation/detection is very challenging since LN can be easily
23
+ confused with other hard negative anatomic structures (e.g.,
24
+ vessels) from radiological images. 1) We explore the anatomical
25
+ spatial context priors of pancreatic LN locations by generating a
26
+ guiding attention map from related organs and vessels to assist
27
+ segmentation and infer LN status. As such, LN segmentation
28
+ is impelled to focus on regions that are anatomically adjacent
29
+ or plausible with respect to the specific organs and vessels
30
+ (thus hard negative samples with certain distance ranges can
31
+ be ignored). 2) The metastasized LN identification network is
32
+ trained to classify the segmented LN instances into positives
33
+ or negatives by reusing the segmentation network as a pre-
34
+ trained backbone and padding a new classification head. 3)
35
+ More importantly, we develop a LN metastasis status prediction
36
+ network that combines the patient-wise aggregation results of LN
37
+ segmentation/identification and deep imaging features extracted
38
+ from the tumor region. 4) Extensive quantitative nested five-
39
+ fold cross-validation is conducted on a discovery dataset of 749
40
+ patients with PDAC. External multi-center clinical evaluation is
41
+ further performed on two other hospitals of 191 patients in total.
42
+ Our final multi-staged LN status prediction network statistically
43
+ significantly outperforms the strong baseline of nnUNet and
44
+ several other compared methods, including CT-reported LN
45
+ status, radiomics, and deep learning models.
46
+ Index Terms—Pancreatic ductal adenocarcinoma (PDAC),
47
+ Z. Zheng, L. Huang, J. Xiao are with Ping An Technology (Shanghai &
48
+ Shenzhen), People’s Republic of China, (e-mail: [email protected]).
49
+ X. Fang, M. Zhu and J. Lu are with Changhai Hospital, Shanghai, People’s
50
+ Republic of China. J. Yao, L. Lu and L. Zhang were with PAII Inc., Bethesda,
51
+ MD 20817, USA. X. Fang and J. Yao contributed equally.
52
+ Y. Shi is with Department of Radiology, Shengjing Hospital of China
53
+ Medical University, Shenyang, China.
54
+ H. Lu is with Department of Radiology, Tianjin Medical University Cancer
55
+ Institute and Hospital, National Clinical Research Center of Cancer, Key
56
+ Laboratory of Cancer Prevention and Therapy, Tianjin, China.
57
+ *s indicate joint corresponding authors. Y. Bian and C. Shao are
58
+ with Changhai Hospital, Shanghai, People’s Republic of China, (email:
59
60
+ Lymph node metastasis, Lymph node segmentation, Contrast-
61
+ enhanced computed tomography
62
+ I. INTRODUCTION
63
+ P
64
+ ANCREATIC cancer is the third leading cause of overall
65
+ cancer death in the United States [1], of which approx-
66
+ imately 95% is pancreatic ductal adenocarcinoma (PDAC)
67
+ [2]. With the poorest prognosis (i.e., 5-year overall survival
68
+ (OS) of 10% approximately), surgical resection is the most
69
+ effective way to achieve long-term survival for patients with
70
+ PDAC [2]. However, not all patients can benefit from the
71
+ margin-negative (R0) resection and comprehensive treatment
72
+ protocol is usually established for pancreatic cancer. The
73
+ patients’ treatment selections can be determined by whether
74
+ their peripancreatic lymph nodes (LNs) have metastasized with
75
+ the options of adjuvant radiotherapy (RT) or chemotherapy.
76
+ It was found that neoadjuvant therapy before surgery was
77
+ associated with improved survival and time to recurrence in
78
+ patients with LN metastasis, since neoadjuvant therapy can
79
+ not only treat lymphovascular invasion but also benefit tumor
80
+ downstaging [3], [4]. The accurate preoperative detection of
81
+ LN metastasis becomes vital and would aid in treatment
82
+ planning and management.
83
+ Contrast-enhanced CT is used as the typical imaging pro-
84
+ tocol for identifying the presence of peripancreatic metastatic
85
+ disease to LNs, but it is a very challenging task for radiologists
86
+ to determine whether a patient has LN metastasis by using only
87
+ CT scans. To this end, poor diagnostic accuracy of CT with a
88
+ pooled sensitivity of 25% and positive predictive value (PPV)
89
+ of 28% was reported in a meta-analysis [5] on assessing extra-
90
+ regional LN metastasis in pancreatic and peri-ampullary can-
91
+ cer. Recently, several radiomics based approaches have been
92
+ proposed to tackle the LN metastasis differentiation problem
93
+ of various cancer types
94
+ [6]–[12]. However, these methods
95
+ require hand-crafted feature design which can bring concerns
96
+ of reproducibility and human bias is often introduced due to
97
+ manual selection of 2D tumor slice with limited representation
98
+ power. Although there are some deep learning work that report
99
+ promising performance on predicting LN metastasis status in
100
+ gastric cancer [13], [14], those models assume that the risk of
101
+ metastases is fundamentally driven by the primary tumor. They
102
+ rely on LN CT report information for the integration model
103
+ without using any LNs detection or segmentation. The model
104
+ that takes both tumor morphology and lymphatic anatomy into
105
+ arXiv:2301.01448v1 [eess.IV] 4 Jan 2023
106
+
107
+ 2
108
+ Arterial
109
+ Arterial
110
+ Venous
111
+ Patient with metastasis
112
+ Patient without metastasis
113
+ Tumor
114
+ LN
115
+ Tumor-LN anatomy
116
+ Negative LN
117
+ Positive LN
118
+ Tumor
119
+ Tumor & LN
120
+ Spleen
121
+ Esophagus
122
+ Stomach
123
+ Aorta
124
+ Pancreas
125
+ Duodenum
126
+ SMA
127
+ TC
128
+ LGA
129
+ CHA&PHA
130
+ Organs & Vessels
131
+ Venous
132
+ LN
133
+ LN
134
+ LN
135
+ LN
136
+ LN
137
+ Tumor
138
+ LN
139
+ LN
140
+ LN
141
+ LN
142
+ LN
143
+ Tumor
144
+ LN
145
+ LN
146
+ LN
147
+ LN
148
+ Tumor
149
+ LN
150
+ LN
151
+ LN
152
+ LN
153
+ Tumor
154
+ Fig. 1. A visualization of pancreatic tumor (in dark red) and LNs (in pink red for positives or green for negatives) in multi-phase CT images
155
+ and their spatial distributions corresponding to key anatomical structures as follows. SMA: superior mesenteric artery. TC&SA: truncus
156
+ coeliacus and splenic artery. LGA: left gastric artery; CHA&PHA: common hepatic artery and proper hepatic artery.
157
+ account could be of more clinically usefulness on addressing
158
+ these aforementioned issues, similarly as in the diagnostic
159
+ processes performed by radiologist readers. PET/CT is another
160
+ imaging modality worth exploring. PET/CT based approaches
161
+ [15]–[17] generally use maximum standardized uptake value
162
+ (SUVmax) of manually-drawn LN RoIs as the prediction
163
+ element, but it comes with challenges of numerous false
164
+ positives from inflammatory LNs and false negatives from
165
+ small-sized metastatic LNs [18], [19]. Also, it is relatively
166
+ not as common as CT, which is less affordable, available and
167
+ accessible, hence we opt for CT for our research purpose.
168
+ In this paper, we tackle the LN metastasis status prediction
169
+ problem in patients with PDAC by first segmenting and
170
+ identifying instances of LNs and then classifying the patients
171
+ into metastasis-positive or -negative group. LNs are tiny struc-
172
+ tures that anatomically locate surrounding organs and vessels.
173
+ Their locations have been mapped into 18 stations that are
174
+ relevant for pancreatic cancer tumor according to their relative
175
+ positions against adjacent anatomical structures, as defined
176
+ by Japan Pancreas Society (JPS) [20] (see Supplementary
177
+ Table 1 in the supplementary material for details). Examples
178
+ of their spatial distribution are shown in Fig. 1. Metastasis
179
+ happens when cancer cells spread from the primary tumor
180
+ to LNs, causing enlargement of LNs and other underlying
181
+ changes. Response Evaluation Criteria in Solid Tumors (RE-
182
+ CIST) criteria [21] defines the criteria for LN metastasis
183
+ suspicion, i.e., nodes with short axis greater than 10mm,
184
+ heterogeneity and central necrosis. However, these criteria are
185
+ not pathognomonic since there exist false negatives associated
186
+ with small node micrometastases and false positives with
187
+ inflammatory nodes larger than 10mm in short axis. Hence,
188
+ finding LNs in CT images is quite time-consuming and can be
189
+ inconsistent depending on radiologists’ subjective experiences.
190
+ It is ambiguous for radiologists to identify nodal positivity
191
+ accurately from CT without referring to pathology reports.
192
+ The gold standard for determination of metastasis is based on
193
+ post-operative pathological evaluation of pancreatectomy spec-
194
+ imens. Automated yet reliable pre-operative LN segmentation
195
+ and identification are highly desirable for patient surgical or
196
+ RT treatment planing.
197
+ LN segmentation is inherently challenging due to two
198
+ reasons: 1) small to tiny sizes of LN cause extreme foreground-
199
+ background class imbalance problem; 2) LNs have CT atten-
200
+ uation values similar to vessels and other soft-tissue struc-
201
+ tures, resulting in visual confusions. Existing work [22]–[24]
202
+ mainly adopt U-Net based deep networks [25]–[27] as strong
203
+ backbones, in which skip connections aggregate multi-level
204
+ semantic representation and alleviate vanishing gradient prob-
205
+ lem. They incorporate anatomical context by directly taking
206
+ organs&vessels segmentation masks as either supervision tar-
207
+ gets [22] or additional inputs [24], [28]. Concerns are remained
208
+ that the relationship between lymphatic anatomy and adjacent
209
+ anatomical structures is not well explored. We address it by
210
+ introducing a distance-guided attention map to fully utilize the
211
+ spatial priors. In our segmentation framework, the LN attention
212
+ map is obtained via a pre-defined mapping from distance
213
+ maps of related organs/vessels that have been integrated into
214
+ UNet-based backbone to control the segmentation network’s
215
+
216
+ TO:
217
+ B
218
+ TVN00SL0003TO五
219
+ TO
220
+ TVNO0O0003TO:
221
+ BTVN001O0003TO:
222
+ B
223
+ TVN00SL0003TO:
224
+ B3
225
+ spatial focus. It simultaneously assists in improving sample
226
+ selection strategy that filters out non-informative negative
227
+ samples (called ”informative negative selection”) to tackle the
228
+ class imbalance problem. The segmented LNs are labeled as
229
+ positive/negative using radiologist’s judgement as the standard
230
+ that combines information from pathological results and CT
231
+ intensities. A classification network is subsequently derived by
232
+ sharing the same backbone with segmentation and initialized
233
+ with the trained segmentation parameters. This strategy ben-
234
+ efits the classification task from densely structured prediction
235
+ in segmentation. By predicting LN metastasis in patients
236
+ with PDAC, we employ a modified ResNet [29] classification
237
+ model. Tumor characteristics are proven to be important cues
238
+ for metastasis [8], [9], [11], so we integrate both tumor and
239
+ LN cues by taking as inputs the image patches of tumor
240
+ and the patient-wise aggregation of LN segmentation and
241
+ identification.
242
+ Our main contribution is four folds: 1) To the best of
243
+ our knowledge, this work is the first to directly incorporate
244
+ automated LN segmentation and identification for assisting
245
+ preoperative LN metastasis status prediction for patients with
246
+ PDAC. 2) We propose an attention-based LN segmentation
247
+ network with the guidance of distances to nearby anatomical
248
+ structures, explicitly exploiting the spatial priors and simul-
249
+ taneously addressing the foreground-background imbalance
250
+ issue. 3) We design a compositive LN metastasis status
251
+ prediction network combining both tumor and positive LN
252
+ characteristics, showing the potential of tumor-LN guided
253
+ cues. 4) Extensive quantitative experiments are conducted to
254
+ evidently validate the effectiveness of our deep local attention
255
+ network in both tasks of LN segmentation and LN metastasis
256
+ status prediction, and external multi-center clinical evaluation
257
+ is performed to demonstrate the generalization ability. Without
258
+ loss of generality, our proposed method is applicable for
259
+ finding the preoperative LN metastasis status of other types
260
+ of solid tumor or cancers, such as liver or gastric cancers.
261
+ II. RELATED WORK
262
+ A. Lymph Node Segmentation
263
+ Automated LN segmentation in CT images is an essential
264
+ yet challenging task in medical image analysis. Traditional
265
+ approaches tackle this problem by the means of atlas based
266
+ search space restriction [30], spatial prior features combination
267
+ [31], [32], supervoxel clustering [33], etc. In recent years, U-
268
+ Net based deep networks have shown remarkable performance
269
+ in numerous organ or tumor segmentation tasks [34]–[38].
270
+ nnUNet [27] further proposes a self-configuring approach,
271
+ with automatic configurations including preprocessing, net-
272
+ work architecture, training and post-processing, that achieves
273
+ robust performance and general applicability. To address the
274
+ strong class imbalance issues in LN segmentation, four other
275
+ anatomical structures are included as training targets [22]
276
+ using 3D U-Net [26] framework. [23] utilizes parallel net-
277
+ works of 2D U-Net [25] and Mask R-CNN [39] with the
278
+ supervision of all considered anatomical structures and LNs,
279
+ benefiting from both semantic and instance segmentation.
280
+ Another strategy to incorporate anatomical context is to take
281
+ organ segmentation masks as additional channels of the input.
282
+ [28] proposes an ensemble approach for a slab-wise and
283
+ a downsampled full volume based LN segmentation, taking
284
+ the concatenation of CT image and segmented anatomical
285
+ structure mask as input. DeepStationing [24] presents a key
286
+ referencing organ auto-search strategy and combines selected
287
+ organs into the network via input concatentation for LN station
288
+ parsing. All above methods implicitly exploit spatial priors
289
+ of LNs by injecting the anatomical structure masks either as
290
+ inputs or supervisions, hence the prior knowledge has not been
291
+ fully exploited. More importantly, there is a lack of studies on
292
+ how LN segmentation could be used for predicting metastasis.
293
+ B. Lymph Node Metastasis Prediction
294
+ Radiomics Methods. Radiomics is a powerful technique
295
+ for extracting quantitative image features with the purpose
296
+ of clinical decision support, and thus widely used in can-
297
+ cer research [11], [12], [40]–[42]. It converts imaging data
298
+ into different types of hand-crafted features, including shape,
299
+ intensity, texture and filter-based (e.g., wavelet, Laplacian of
300
+ Gaussian) features. Applications of radiomics on predicting
301
+ LN metastasis from primary tumor have been explored in
302
+ many recent works [6]–[10]. Radiomics features are first
303
+ extracted from manually delineated tumor regions in any
304
+ contrast-enhanced CT images. Feature selection and classifi-
305
+ cation model construction (e.g., logistic regression, random
306
+ forest) are then performed to give LN metastasis prediction
307
+ for various cancer types like gastric cancer [7], [12], biliary
308
+ tract cancer [6] and PDAC [8], [9], [11]. Relying only on
309
+ primary tumor radiomics without considering LNs character-
310
+ istics may limit the prediction performance, thus [10] uses
311
+ manual annotations of the largest LN visible in the gastric
312
+ region and combines LN radiomics into the prediction model
313
+ for gastric cancer. However, problem still remains because it
314
+ simply involves the largest LN without identifying the nodal
315
+ positivity.
316
+ Deep Learning based Methods. Recent advances in deep
317
+ learning have made it a mainstream method of addressing the
318
+ entire workflow of diagnosis and treatment for various cancer
319
+ types on medical imaging, such as orapharageal cancer [43],
320
+ lung cancer [44], as well as pancreatic cancer [45]–[47]. Deep
321
+ neural networks are applied to LN metastasis in many studies
322
+ [13], [14], [48], [49]. In [48], deep features are extracted
323
+ from tumor ROIs in bimodal image (i.e., US and SWE) using
324
+ ResNet [29], and then fed into a SVM model for predicting
325
+ axillary LN status in breast cancer. For gastric cancer, [14]
326
+ combines DenseNet [50] features with some hand-crafted
327
+ features, extracted from the 2D tumor ROI with the largest
328
+ area in multi-phase CT images. To investigate metastasis in
329
+ individual LN stations for gastric cancer, [13] develops a
330
+ system of multiple independent ResNets with tumor ROIs and
331
+ corresponding annotation masks as inputs where each ResNet
332
+ is responsible to predict metastasis at one specific nodal sta-
333
+ tion. Most existing studies capture only tumor characteristics
334
+ for LN metastasis prediction, while the one leveraging LN
335
+ radiomics requires manual delineation and considers simply
336
+ the LN with the largest size [10]. An automated and accurate
337
+
338
+ 4
339
+ b) LN metastasis status prediction network
340
+ Volume
341
+ Image
342
+ PDAC
343
+ Mask
344
+ Metastasis
345
+ -positive
346
+ Metastasis-
347
+ Negative
348
+
349
+ Cropping
350
+ Cropping
351
+ c
352
+ Lymph node
353
+ segmentation &
354
+ identification
355
+ Lymph node
356
+ segmentation &
357
+ identification
358
+ Volume
359
+ Image
360
+ PDAC
361
+ Mask
362
+ Metastasis
363
+ -positive
364
+ Metastasis-
365
+ Negative
366
+
367
+ Cropping
368
+ Cropping
369
+ c
370
+ Lymph node
371
+ segmentation &
372
+ identification
373
+ Image
374
+ Lymph Node
375
+ Segmentation
376
+ Instance-wise
377
+ Lymph Node
378
+ Identification
379
+ Organ&
380
+ Vessel
381
+ Distance
382
+ Transform
383
+ Non-linear
384
+ Mapping
385
+ Attention
386
+ Mechanism
387
+ GAP
388
+ a) Lymph node segmentation & identification network
389
+ c
390
+ concatenation
391
+ element-wise
392
+ multiplication
393
+ Downsampling
394
+ Fig. 2. The proposed framework for (a) two-stage LN segmentation and identification and (b) LN metastasis status prediction .
395
+ process of LN segmentation and nodal positivity identification
396
+ is hence of high importance for assisting metastasis prediction.
397
+ III. METHODOLOGY
398
+ The overall framework is illustrated in Fig. 2, which is com-
399
+ posed of (a) distance-guided attention-based LN segmentation
400
+ and identification network, and (b) tumor and LN combined
401
+ metastasis status prediction network.
402
+ A. Distance-guided Attention-based Lymph Node Segmenta-
403
+ tion and Identification Network
404
+ We perform LN detection from any input CT scan by a two-
405
+ stage strategy: segmenting the image into two classes of LN
406
+ and background voxels, followed by identifying segmented LN
407
+ instances as positive or negative.
408
+ 1) Stage 1: Class-agnostic Lymph Node Segmentation:
409
+ Based on the spatial prior that LN stations are geometrically
410
+ distributed or constrained around certain anatomical structures,
411
+ we propose an attention based LN segmentation network by
412
+ taking the distances to nearby organs/vessels into account.
413
+ Our LN segmentation network differs from the strong baseline
414
+ (i.e., nnUNet [27]) in that attention mechanism is applied to
415
+ guide possible LN locations, with the advantage of reducing
416
+ false positive predictions outside those locations. The intuition
417
+ behind the attention module is that the attention map can cover
418
+ regions adjacently constrained to those organs and vessels.
419
+ Attention Map Generation. To explicitly capture and
420
+ model the lymphatic anatomy, attention computation is im-
421
+ plemented as a pre-defined geometric mapping function from
422
+ organ&vessel distance maps. An example of attention map
423
+ generation process is shown in Fig. 3. Specifically, given a
424
+ multi-phase input CT volume X ∈ RN×W ×H×D, we first
425
+ obtain organ&vessel segmentation mask using nnUNet [27]
426
+ model trained with 19 classes of annotations. Ten classes
427
+ among them involved with 17 LN stations are used (see
428
+ Supplementary Table 1 in the supplementary material for the
429
+ definition of LN stations), i.e., spleen, esophagus, stomach,
430
+ aorta, pancreas, duodenum, superior mesenteric artery (SMA),
431
+ truncus coeliacus and splenic artery (TC&SA), left gastric
432
+ artery (LGA), common hepatic artery and proper hepatic
433
+ artery (CHA&PHA). Note that station 15# (LNs along middle
434
+ Spleen
435
+ Aorta
436
+ SMA
437
+ LGA
438
+ Image
439
+ Organ&Vessel
440
+ Organ/Vessel
441
+ Distance Map
442
+ Organ/Vessel
443
+ Attention Map
444
+ Integrated
445
+ Attention Map
446
+ CHA&PHA
447
+ Esophagus
448
+ Duodenum
449
+ Non-linear
450
+ Mapping
451
+ 0
452
+ 1
453
+ d imin
454
+ min
455
+ d imin
456
+ dmax
457
+ imax
458
+ i
459
+ dmax
460
+ i
461
+ d imin
462
+ min
463
+ d imin-3
464
+ d imin-3
465
+ dmax
466
+ imax
467
+ i
468
+ dmax
469
+ i
470
+ +3
471
+ dmax
472
+ i
473
+ +3 d
474
+ fifi
475
+ Stomach
476
+ Pancreas
477
+ TC&SA
478
+ Fig. 3. An illustration of attention map generation process.
479
+ colic artery) is left aside here since it is related to distant
480
+ metastasis that rarely happens in our patient population. A
481
+ SDT is applied to each class of the segmentation mask M ∈
482
+ {0, 1, 2, ..., 10}W ×H×D, generating a total of 10 organ/vessel
483
+ distance maps Di where i ∈ {1, 2, ..., 10} is the index of
484
+ organ/vessel class. Di has positive values at the voxels outside
485
+ the i-th organ/vessel and negative scores inside it. Intuitively,
486
+ LNs are likely to appear within a certain range of distance
487
+ to each organ/vessel, which requires paying attention to. To
488
+ obtain the distance-guided attention maps, Di is passed to an
489
+ isosceles trapezium-shaped non-linear mapping function (see
490
+
491
+ TO
492
+ B
493
+ c000.r100.ocbqT0
494
+ B
495
+ E000_100_3sbq5
496
+ Fig. 3), formulated as
497
+ f i(d) =
498
+
499
+
500
+
501
+
502
+
503
+
504
+
505
+
506
+
507
+
508
+
509
+
510
+
511
+
512
+
513
+
514
+
515
+ 1,
516
+ di
517
+ min ≤ d ≤ di
518
+ max
519
+ − (d − di
520
+ max − 3)
521
+ 3
522
+ ,
523
+ di
524
+ max<d<di
525
+ max + 3
526
+ (d − di
527
+ min + 3)
528
+ 3
529
+ ,
530
+ di
531
+ min − 3<d<di
532
+ min
533
+ 0,
534
+ Otherwise
535
+ .
536
+ (1)
537
+ where d is the individual element in Di; di
538
+ min and di
539
+ max
540
+ determine the distance range in mm; the smooth border 3mm
541
+ is chosen empirically. This mapping function converts the
542
+ distance maps to the attention scores ranging from 0 to 1, with
543
+ 1 indicating possible locations of LNs, 0 indicating impossible
544
+ locations, and decimals lying in between. The i-th attention
545
+ map Ai is obtained by Ai = f i(Di).
546
+ The final attention map Aall is produced by integrating all
547
+ of the organ/vessel-specific attention maps, thus Aall can cover
548
+ the whole areas that need attending to. In specific, Aall takes
549
+ the element-wise maximum of all Ai except for the voxels
550
+ inside an organ/vessel, illustrated as
551
+ aall
552
+ v
553
+ =
554
+
555
+ max
556
+ i=1,2,...,10 ai
557
+ v,
558
+ mv = 0
559
+ ai
560
+ v,
561
+ mv = i
562
+ .
563
+ (2)
564
+ where a∗
565
+ v and mv are the values of A∗ and M at the voxel v.
566
+ Attention based Lymph Node Segmentation. After ob-
567
+ taining the distance-guided attention map, we incorporate it
568
+ to the segmentation network with 3D nnUNet [27] as the
569
+ backbone. The attention mechanism emphasizes LN-related
570
+ regions by spatially scaling the features with the attention map.
571
+ Given the input image X and the one-hot segmentation label
572
+ Y , the deep features at the penultimate layer are extracted
573
+ and multiplied element-wisely with Aall. It is finally passed
574
+ through a convolution block with a softmax layer to produce
575
+ the segmentation output P ∈ R2×W ×H×D.
576
+ Due to GPU memory limitation, patch-based training strat-
577
+ egy is employed. nnUNet randomly samples 3D image patches
578
+ from the whole CT scan and enforces that more than a third
579
+ of samples in a batch contain at least one foreground class to
580
+ control the foreground-to-background ratio. Considering the
581
+ extreme class imbalance problem caused by the small LN
582
+ targets, we improve it with the “informative negative selection”
583
+ scheme. Note that our proposed attention mechanism helps
584
+ block out features at the voxels with a certain distance to
585
+ or inside all organs and vessels, resulting in lots of non-
586
+ informative negative patches filled with 0 by applying zero
587
+ attention scores. Thus we can naturally throw out those non-
588
+ informative patches, and select patches containing at least one
589
+ attention score > 0 (called informative patches) for training.
590
+ This sampling strategy further boosts the network’s concen-
591
+ tration on targeted regions of interest (ROIs) surrounding
592
+ organs/vessels.
593
+ For training objectives to better balance precision and recall
594
+ , we modify the Dice loss in nnUNet with Tversky loss [51]:
595
+ LT = − 2
596
+ |V |
597
+
598
+ v p1,vy1,v
599
+ 2 �
600
+ v p1,vy1,v + α �
601
+ v p1,vy0,v + β �
602
+ v p0,vy1,v
603
+ (3)
604
+ where |V | is the number of voxels. p1,v is the probability
605
+ of voxel v being a LN, and p0,v is the probability being a
606
+ non-LN. Also, y1,v is 1 for a LN voxel and 0 for a non-LN
607
+ voxel, and vice versa for the y0,v. In practice, we set α = 0.5
608
+ and β = 1.5 to emphasis on false negatives and boost recall.
609
+ The whole network is trained by the combination of cross
610
+ entropy loss LCE and Tversky loss LT with equal weights as
611
+ in nnUNet.
612
+ LCE = − 1
613
+ |V |
614
+
615
+ v
616
+
617
+ k=0,1
618
+ yk,v log(pk,v)
619
+ (4)
620
+ LSEG = LCE + LT
621
+ (5)
622
+ Following nnUNet, the network is trained with deep supervi-
623
+ sion, i.e., losses are calculated over multi-resolution outputs
624
+ given by final and intermediate layers of the decoder, and
625
+ the corresponding downsampled ground-truth (GT) masks are
626
+ used as targets. Here attention mechanism is applied in a multi-
627
+ scale manner. That is, the attention map, after downsampled
628
+ to match the resolution, is injected to the intermediate decoder
629
+ feature for each deep supervision output.
630
+ 2) Stage 2: Instance-wise Lymph Node Identification: After
631
+ segmenting LN instances from the whole CT image, we then
632
+ classify them into either positive or negative class. To benefit
633
+ from the already trained dense segmentation network of stage
634
+ 1, the task of LN instance identification reuses 3D nnUNet
635
+ backbone and is initialized using the trained segmentation
636
+ parameters, with a new classification head added upon it.
637
+ Cross entropy loss is adopted to finetune the whole network
638
+ for classifying the instance as positive/negative. To generate
639
+ LN instances, we crop patches centered at the connected
640
+ components of the segmentation mask. GT LN instances are
641
+ cropped and employed in the training phase. While at inference
642
+ time, we can apply the classification network to identify
643
+ each segmented LN of stage 1, and obtain a class-aware LN
644
+ segmentation mask.
645
+ B. Tumor and Lymph Node Combined LN Metastasis Status
646
+ Prediction Network
647
+ Besides LNs themselves, imaging characteristics in the
648
+ primary tumor play an important role in predicting the status
649
+ of LN metastasis. To further boost the performance, we build
650
+ a combined classification network, integrating both PDAC and
651
+ LNs related information. In contrast to previous work that
652
+ only consider tumor characteristics [8], [9], [11], our method
653
+ benefits from directly observing the status of LN instances by
654
+ automated LN segmentation and identification.
655
+ Given a CT image and the corresponding PDAC mask,
656
+ 2D slices with the top three largest PDAC areas in each of
657
+ axial, sagittal, and coronal planes are cropped, resulting in
658
+ nine image patches in total. Each image patch is fed into
659
+ a ResNet [29] pre-trained on ImageNet [52] for metastasis
660
+ prediction. Inspired by [53], a side branch with the PDAC
661
+ mask as input is added to encourage the network to concentrate
662
+ on the PDAC region. Specifically, the side branch consists of
663
+ a Conv-ReLU block and maps the input mask to a feature
664
+ map with the same shape as the output of “Conv1” layer in
665
+ ResNet. It is then integrated into the backbone by element-wise
666
+
667
+ 6
668
+ multiplication with the “Conv1” feature. Our initial experiment
669
+ empirically shows that such incorporation produces better per-
670
+ formance than direct input-level fusion, as the convolution in
671
+ the side branch learns which region to focus on in each channel
672
+ of “Conv1” feature (e.g., regions inside the mask, around the
673
+ mask border or outside the mask). To be better aligned with
674
+ the pre-trained backbone and eliminate the initial effect of the
675
+ side branch, the weights and biases in the convolution layer
676
+ are initialized to 0 and 1 respectively. Before classification, we
677
+ additionally employ a Texture Encoding Layer (TEL) [54] on
678
+ top of the “Layer4” feature FL4 to extract respective texture
679
+ representation. TEL serves as an orderless feature pooling
680
+ layer that encodes spatially invariant representation describing
681
+ the feature distributions, which benefits texture recognition of
682
+ the PDAC region. The original deep feature is merged with
683
+ the texture feature to form an enhanced representation F:
684
+ F = Concat(GAP(FL4), TEL(FL4))
685
+ (6)
686
+ where Concat and GAP denote feature concatenation and
687
+ global average layer, respectively.
688
+ We further integrate LN-related cues into the network given
689
+ the LN segmentation and identification results described in
690
+ Section III-A. A patient is considered as metastasis-positive if
691
+ there exists at least one positive LN, thus it is very sensitive
692
+ to the false positives in LN identification. Therefore, we
693
+ employ the volume of positive LN as the feature instead
694
+ of its binary status of presence/absence, based on the fact
695
+ that positive LNs tend to have larger volume than negative
696
+ ones. The volume of the largest positive LN in each patient
697
+ Vmax
698
+ pLN =
699
+ max
700
+ ln∈{positive LNs} Vln (in mm3) is mapped to a vector-
701
+ shaped feature, and fused with F by element-wise addition,
702
+ formulated as follows:
703
+ Fcomb = FC(BN(Vmax
704
+ pLN)) + F
705
+ (7)
706
+ where FC and BN denote the full-connected layer and batch
707
+ normalization layer, respectively. Other LN features, such as
708
+ the average or total volume of positive LNs, are also evaluated,
709
+ with the current setting giving the best result. Finally, the
710
+ classification probabilities generated from nine image patches
711
+ are averaged to given an ensembled prediction for a patient.
712
+ IV. EXPERIMENTS
713
+ In this section, we first demonstrate the multicenter datasets
714
+ (i.e. the discovery dataset and two external datasets) and
715
+ implementation details, and then elaborate the strategy we
716
+ use to generate PDAC segmentation masks. Subsequently we
717
+ present results on the discovery dataset in each step of our
718
+ method, including organ&vessel segmentation, attention map
719
+ generation, LN segmentation and identification, and LN metas-
720
+ tasis status prediction. Finally, external validation is conducted
721
+ to evaluate the generalization performance of LN metastasis
722
+ status prediction, with only pathology reports accessible in two
723
+ external datasets.
724
+ A. Experimental Settings
725
+ 1) Dataset:
726
+ We conduct a multicenter study on three
727
+ independent datasets with a total of 940 patients collected
728
+ from Changhai Hospital in Shanghai, Shengjing Hospital in
729
+ Liaoning Province, and Tianjin Cancer Hospital in Tianjin. All
730
+ patients had a pathologically confirmed diagnosis of PDAC,
731
+ and contrast-enhanced CT scans of arterial (A) and venous
732
+ (V) phases acquired before treatment were included in this
733
+ study. We labeled LNs on the dataset from Changhai Hospital
734
+ (denoted as Discovery dataset), and developed our model on
735
+ it using nested cross-validation (CV). The rest two datasets
736
+ from Shengjing Hospital and Tianjin Cancer Hospital (denoted
737
+ as Ext-validation dataset 1 and Ext-validation dataset 2) were
738
+ used as external validation sets with only pathologically diag-
739
+ nosed LN metastasis status provided. This study was reviewed
740
+ and approved by the Biomedical Research Ethics Committee
741
+ of the institution (No. CHEC2021164), and was performed in
742
+ accordance with the ethical standards of the 1964 Declaration
743
+ of Helsinki. The requirement for patient informed consent
744
+ was waived by the Institutional Review Board due to the
745
+ retrospective nature of the study and because all procedures
746
+ performed were part of routine care.
747
+ Discovery dataset contains CT scans of 749 patients,
748
+ among which there are 351 positive samples (patients with
749
+ LN metastasis) and 398 negative samples (patients without
750
+ LN metastasis). The annotation of LNs was performed by
751
+ two board-certified radiologists (XF with 7 and MZ with 5
752
+ years of experiences in pancreatic imaging) with referring to
753
+ pathology report under supervision of a senior radiologist (YB)
754
+ with 17 years of experiences in pancreatic imaging. There are
755
+ totally 2,467 labeled LNs, of which 476 are positive and the
756
+ rest are negative. In specific, 351 metastasis-positive patients
757
+ contain 476 labeled positive and 322 labeled negative LNs, and
758
+ 398 metastasis-negative patients have the rest 1,669 labeled
759
+ negative LNs.
760
+ This dataset was split using nested five-fold CV, with 64%,
761
+ 16% and 20% as training, validation and testing sets in each
762
+ CV round. As for the primary tumor, 163 patients among the
763
+ whole dataset were annotated with 3D tumor masks by two
764
+ radiologists (XF and MZ). We use these 163 patients as the
765
+ testing set and the remaining unlabeled 586 patients as the
766
+ training set for an annotation-efficient PDAC segmentation.
767
+ Additionally, we generate pseudo annotations of 17 classes
768
+ of organs and vessels using the self-learning segmentation
769
+ model described in [55], and manually annotate extra two
770
+ vessels (LGA, CHA&PHA) and extend other two vessels
771
+ (SMA and TC&SA) under the supervision of a radiologist
772
+ (XF) for 50 patients randomly sampled from our dataset. 40/10
773
+ of these patients are used as training and validation sets for
774
+ organ&vessel segmentation, respectively.
775
+ Ext-validation dataset 1 contains CT scans of 132 patients
776
+ with 39 positive and 93 negative patients; Ext-validation
777
+ dataset 2 contains 59 patients with 37 positive and 22 negative
778
+ patients. More detailed information on three datasets can be
779
+ seen in Table I.
780
+ 2) Implementation Details: In our experiments, CT images
781
+ of arterial phase are registered to venous phase using DEEDS
782
+ [56], and they are all resampled to a median spacing of 0.68
783
+ × 0.68 × 0.80 mm. For LN segmentation and organ&vessel
784
+ segmentation, sub-volumes of 160 × 192 × 80 are randomly
785
+ cropped as training patches. In the non-linear mapping from
786
+
787
+ 7
788
+ TABLE I
789
+ DEMOGRAPHIC DISTRIBUTIONS AND TUMOR CHARACTERISTICS IN THE THREE DATASETS (DISCOVERY DATASET, EXT-VALIDATION
790
+ DATASET 1 AND EXT-VALIDATION DATASET 2). MEDIAN [INTERQUARTILE RANGE, 25TH–75TH PERCENTILE] VALUES ARE REPORTED
791
+ FOR CONTINUOUS VARIABLES.
792
+ Characteristics
793
+ Discovery dataset
794
+ Ext-validation dataset 1
795
+ Ext-validation dataset 2
796
+ (n=749)
797
+ (n=132)
798
+ (n=59)
799
+ Gender, n (%)
800
+ Female
801
+ 282 (38%)
802
+ 60 (45%)
803
+ 28 (47%)
804
+ Male
805
+ 467 (62%)
806
+ 72 (55 %)
807
+ 31 (53%)
808
+ Age at Diagnosis, yrs
809
+ 63 [56-69]
810
+ 60 [53-65]
811
+ 58 [51-62]
812
+ pT Stage, n (%)
813
+ pT1 / pT2
814
+ 92 (12%) / 314 (42%)
815
+ 24 (18%)/ 80 (61%)
816
+ 10 (17%) / 31 (53%)
817
+ pT3 / pT4
818
+ 316 (42%) / 13 (2%)
819
+ 15 (11%)/ 13 (10%)
820
+ 5 (8%)/ 13 (22%)
821
+ Missing
822
+ 14 (2%)
823
+ 0 (0 %)
824
+ 0 (0 %)
825
+ pN Stage, n (%)
826
+ pN0
827
+ 398 (53%)
828
+ 93 (70%)
829
+ 22 (37%)
830
+ pN1
831
+ 242 (32%)
832
+ 32 (24%)
833
+ 22 (37%)
834
+ pN2
835
+ 109 (15%)
836
+ 7 (5%)
837
+ 15 (25%)
838
+ Tumor Size, cm
839
+ 3.0 [2.5-4.1]
840
+ 2.7 [2.2-3.0]
841
+ 2.9 [2.2-3.4]
842
+ Tumor Location, n (%)
843
+ Head / Uncinate
844
+ 475 (63%)
845
+ 56 (42%)/ 52 (39%)
846
+ 35 (59%)/ 22 (37%)
847
+ Body / Tail
848
+ 274 (37%)
849
+ 2 (2%)/ 22 (17%)
850
+ 2 (3%)/ 0 (0%)
851
+ Positive LN Volume, mm3
852
+ 665[210-804]
853
+ -
854
+ -
855
+ Negative LN Volume, mm3
856
+ 300[106-377]
857
+ -
858
+ -
859
+ distance maps to attention maps for our LN segmentation,
860
+ parameters of the mapping function are determined by group-
861
+ ing GT LN voxels according to which organ/vessel is closest
862
+ to, and calculating the minimum and maximum distances to
863
+ organ/vessel boundaries in each group. Parameters are listed
864
+ in Table II, in which negative values indicate voxels inside
865
+ organ/vessel.
866
+ TABLE II
867
+ PARAMETERS (I.E. dmin AND dmax) OF NON-LINEAR MAPPING FUNCTION
868
+ FOR EACH ORGAN OR VESSEL. SMA: SUPERIOR MESENTERIC ARTERY.
869
+ TC&SA: TRUNCUS COELIACUS AND SPLENIC ARTERY. LGA: LEFT
870
+ GASTRIC ARTERY; CHA&PHA: COMMON HEPATIC ARTERY AND PROPER
871
+ HEPATIC ARTERY.
872
+ Organ/Vessel
873
+ dmin (mm)
874
+ dmax (mm)
875
+ Spleen
876
+ 0
877
+ 16
878
+ Esophagus
879
+ 0
880
+ 25
881
+ Stomach
882
+ -2
883
+ 18
884
+ Aorta
885
+ 0
886
+ 28
887
+ Pancreas
888
+ -5
889
+ 20
890
+ Duodenum
891
+ -5
892
+ 22
893
+ SMA
894
+ -1
895
+ 20
896
+ TC&SA
897
+ -2
898
+ 18
899
+ LGA
900
+ 0
901
+ 21
902
+ CHA&PHA
903
+ 0
904
+ 20
905
+ As for instance-wise LN identification, 3D image training
906
+ samples are generated by cropping a 96 × 96 × 80 sub-volume
907
+ centered per each GT LN. SGD optimizer with Nesterov
908
+ momentum (µ = 0.95) is adopted to train the network, whose
909
+ initial learning rate and weight decay are 5 × 10−4 and
910
+ 1 × 10−4, respectively. Furthermore, the final LN metastasis
911
+ status prediction model takes 2D inputs of 224 × 224 cen-
912
+ tered at PDAC, and is trained using the same optimizer as
913
+ above. Details of the network architecture are presented in the
914
+ supplementary material.
915
+ B. PDAC Segmentation Mask Acquisition/Harvesting
916
+ We employ an annotation-efficient strategy to generate
917
+ 3D masks of tumors for the labor cost reduction purpose.
918
+ Specifically, we start with the PDAC segmentation model
919
+ trained with arterial-late phase described in [46] to generate
920
+ pseudo annotations. Next, the model is fine-tuned under the
921
+ supervision of pseudo annotations and then applied to produce
922
+ segmentation masks on our dataset. To obtain the PDAC
923
+ segmentation model on venous phase, those segmentation
924
+ masks are registered to venous phase and are then used to
925
+ train a nnUNet model from scratch to generate the final 3D
926
+ masks of tumors. We evaluate the final PDAC segmentation
927
+ model on the labeled testing set. Median Dice score, average
928
+ surface distance (ASD, mm), and Hausdorff distance (HD, mm)
929
+ are 0.683, 2.186, and 12.805 respectively.
930
+ C. Evaluation of Organ&Vessel Segmentation and Attention
931
+ Maps
932
+ To evaluate the performance of organ&vessel segmentation,
933
+ a testing set of 19 randomly selected CT volumes with ten
934
+ classes of organ/vessel is manually annotated by a radiologist
935
+ (XF). To reduce the annotation burden, all dense CT volumes
936
+ are downsampled to 5mm in the slice thickness dimension. We
937
+ compare our self-learning model with the pseudo annotation
938
+ generator [55], which is able to segment eight of ten classes
939
+ (except for LGA and CHA&PHA) on single-phase CT. Dice
940
+ score, ASD (mm), and HD (mm) are adopted as the evaluation
941
+ metrics and the results are provided in Table III. Our model
942
+ that is trained on two phases outperforms [55] on seven
943
+ of eight organs/vessels. Note that SMA and TC&SA masks
944
+ segmented by [55] contain shorter parts compared with those
945
+ segmented by our model, therefore, resulting in significantly
946
+ lower performance than ours (0.331 lower Dice score in SMA,
947
+ and 0.171 lower in TC&SA).
948
+
949
+ 8
950
+ TABLE III
951
+ QUANTITATIVE PERFORMANCE OF ORGAN&VESSEL
952
+ SEGMENTATION. A: ARTERIAL. V: VENOUS. SMA: SUPERIOR
953
+ MESENTERIC ARTERY. TC&SA: TRUNCUS COELIACUS AND
954
+ SPLENIC ARTERY. LGA: LEFT GASTRIC ARTERY; CHA&PHA:
955
+ COMMON HEPATIC ARTERY AND PROPER HEPATIC ARTERY.
956
+ Organ/Vessel
957
+ Methods
958
+ CT Phases
959
+ Dice
960
+ ASD (mm)
961
+ HD (mm)
962
+ Spleen
963
+ [55]
964
+ A
965
+ 0.938
966
+ 0.643
967
+ 14.252
968
+ [55]
969
+ V
970
+ 0.954
971
+ 0.422
972
+ 11.107
973
+ Ours
974
+ A+V
975
+ 0.959
976
+ 0.384
977
+ 8.129
978
+ Esophagus
979
+ [55]
980
+ A
981
+ 0.557
982
+ 0.936
983
+ 13.897
984
+ [55]
985
+ V
986
+ 0.598
987
+ 0.854
988
+ 11.003
989
+ Ours
990
+ A+V
991
+ 0.745
992
+ 0.641
993
+ 8.125
994
+ Stomach
995
+ [55]
996
+ A
997
+ 0.846
998
+ 2.223
999
+ 35.338
1000
+ [55]
1001
+ V
1002
+ 0.813
1003
+ 3.765
1004
+ 43.114
1005
+ Ours
1006
+ A+V
1007
+ 0.905
1008
+ 1.519
1009
+ 19.183
1010
+ Aorta
1011
+ [55]
1012
+ A
1013
+ 0.893
1014
+ 0.519
1015
+ 8.130
1016
+ [55]
1017
+ V
1018
+ 0.924
1019
+ 0.417
1020
+ 6.158
1021
+ Ours
1022
+ A+V
1023
+ 0.920
1024
+ 0.359
1025
+ 5.863
1026
+ Pancreas
1027
+ [55]
1028
+ A
1029
+ 0.712
1030
+ 2.905
1031
+ 25.880
1032
+ [55]
1033
+ V
1034
+ 0.756
1035
+ 1.897
1036
+ 19.258
1037
+ Ours
1038
+ A+V
1039
+ 0.847
1040
+ 0.975
1041
+ 12.859
1042
+ Duodenum
1043
+ [55]
1044
+ A
1045
+ 0.613
1046
+ 2.976
1047
+ 34.187
1048
+ [55]
1049
+ V
1050
+ 0.665
1051
+ 3.366
1052
+ 32.174
1053
+ Ours
1054
+ A+V
1055
+ 0.764
1056
+ 1.892
1057
+ 29.131
1058
+ SMA
1059
+ [55]
1060
+ A
1061
+ 0.387
1062
+ 0.663
1063
+ 68.869
1064
+ [55]
1065
+ V
1066
+ 0.415
1067
+ 0.710
1068
+ 68.098
1069
+ Ours
1070
+ A+V
1071
+ 0.746
1072
+ 0.860
1073
+ 28.840
1074
+ TC&SA
1075
+ [55]
1076
+ A
1077
+ 0.563
1078
+ 0.780
1079
+ 43.974
1080
+ [55]
1081
+ V
1082
+ 0.407
1083
+ 1.245
1084
+ 51.432
1085
+ Ours
1086
+ A+V
1087
+ 0.734
1088
+ 0.305
1089
+ 22.224
1090
+ LGA
1091
+ [55]
1092
+ A
1093
+ -
1094
+ -
1095
+ -
1096
+ [55]
1097
+ V
1098
+ -
1099
+ -
1100
+ -
1101
+ Ours
1102
+ A+V
1103
+ 0.651
1104
+ 0.371
1105
+ 10.420
1106
+ CHA&PHA
1107
+ [55]
1108
+ A
1109
+ -
1110
+ -
1111
+ -
1112
+ [55]
1113
+ V
1114
+ -
1115
+ -
1116
+ -
1117
+ Ours
1118
+ A+V
1119
+ 0.715
1120
+ 1.424
1121
+ 24.239
1122
+ Qualitative evaluation of organ&vessel segmentation exam-
1123
+ ples as well as the corresponding attention maps are visualized
1124
+ in Fig. 4 (b).
1125
+ D. Evaluation of Lymph Node Instance Segmentation and
1126
+ Identification
1127
+ TABLE IV
1128
+ AVERAGE INSTANCE-WISE LN CLASSIFICATION PERFORMANCE
1129
+ ACROSS 5 FOLDS. THE RESULTS ARE REPORTED ON GT
1130
+ INSTANCES.
1131
+ Metric
1132
+ Performance
1133
+ AUC
1134
+ 0.854
1135
+ Accuracy
1136
+ 0.789
1137
+ Balanced accuracy
1138
+ 0.771
1139
+ Sensitivity
1140
+ 0.742
1141
+ Specificity
1142
+ 0.800
1143
+ Quantitative Evaluation. LNs are first detected by the
1144
+ class-agnostic segmentation model, and then identified as
1145
+ positive/negative by applying the classification model on the
1146
+ cropped instances. For positive/negative LN identification, our
1147
+ classification model is trained with Ground-Truth (GT) LNs,
1148
+ yielding an average AUC of 0.854 across 5 folds (in Table IV).
1149
+ At inference, the automatically segmented LNs are cropped
1150
+ and then identified by the classification model. To evaluate the
1151
+ segmentation performance before and after identification, we
1152
+ compare our method with a strong baseline, nnUNet [27]. The
1153
+ segmentation accuracy is measured by voxel-wise metrics (i.e.,
1154
+ Dice, Recall, Precision) and instance-wise metrics (F-measure,
1155
+ Recall, Precision). To achieve the statistical analysis, we apply
1156
+ 1,000 iterations of Wilcoxon signed rank test to voxel-wise
1157
+ Dice and instance-wise F-measure. Results are provided in
1158
+ Table V. An instance is considered successfully detected if
1159
+ its (intersect-over-union) IoU score between the segmentation
1160
+ mask and GT mask is ≥ 30 %. Before identification, our
1161
+ segmentation model significantly outperforms nnUNet on both
1162
+ voxel-wise and instance-wise metrics, with the voxel-wise
1163
+ Dice increasing from 45.9% to 47.7% and the instance-wise
1164
+ F-measure increasing from 36.1% to 40.6%, as shown in Table
1165
+ V. In addition, our model also yields superior performance in
1166
+ terms of both positive and negative LNs after identification,
1167
+ achieving 1.8% higher voxel-wise Dice and 1.8% higher
1168
+ instance-wise F-measure in terms of positive LNs, and 0.2%
1169
+ higher voxel-wise Dice and 1.2% higher instance-wise F-
1170
+ measure in terms of negative LNs. In total five out of six
1171
+ comparisons, our method is statistically significantly better or
1172
+ more accurate (i.e., with p-value < 0.05) in LN segmentation
1173
+ than the nnUNet baseline (implemented without the attention
1174
+ maps).
1175
+ Qualitative Evaluation. Examples of LN segmentation
1176
+ and identification results are shown in Fig. 4 (a) for qual-
1177
+ itative comparison. Our segmentation model leverages prior
1178
+ knowledge of LNs’ position distribution by incorporating the
1179
+ attention mechanism to remove false positives that are far
1180
+ from anatomically plausible LN areas. In Fig. 4 (a), we can
1181
+ observe that nnUNet tends to falsely detect an instance inside
1182
+ some organs or located very far, while our method provides
1183
+ noticeably less false positives.
1184
+ E. Evaluation of Patient-wise Lymph Node Metastasis Status
1185
+ Prediction
1186
+ Metrics. In this section, we evaluate various performance
1187
+ metrics of LN metastasis status prediction. For this binary
1188
+ classification problem, AUC, accuracy, balanced accuracy,
1189
+ sensitivity and specificity are adopted as evaluation metrics
1190
+ and the average results across 5 folds are reported. Statistical
1191
+ analysis is also carried out to verify the significance of
1192
+ performance improvement. We collect the predictions of all
1193
+ 5 folds, repeat 1,000 times of bootstrapping for calculating
1194
+ balanced accuracy, and apply Wilcoxon signed rank test to
1195
+ balanced accuracy distributoins to compare our method with
1196
+ several other configurations. For comparing ROC curves,
1197
+ DeLong test is performed. P-values < 0.05 are considered
1198
+ as statistically significant. To compute 95% CI, the 2.5th
1199
+ percentile and 97.5th percentile are estimated after 1,000 times
1200
+ of bootstrapping.
1201
+ Ablation Study. We first investigate the impact of each
1202
+ component in our framework. To evaluate the metastasis
1203
+ prediction performance of LN segmentation and identification,
1204
+ the results can be aggregated into patient-level prediction,
1205
+ based on the definition that a patient with at least one positive
1206
+ LN is metastasis-positive. However, due to a large number of
1207
+ false positives produced by segmentation (LN segmentation
1208
+ in CT images is challenging after all), it will lead to a poor
1209
+ performance if predicting metastasis simply based on the
1210
+
1211
+ 9
1212
+ TABLE V
1213
+ PERFORMANCE COMPARISON ON LN SEGMENTATION BEFORE AND AFTER INSTANCE-WISE IDENTIFICATION (DENOTED AS
1214
+ Class-agnostic Seg AND Class-aware Seg). POS AND NEG DENOTE POSITIVE AND NEGATIVE LNS. RESULTS ARE AVERAGED ACROSS 5
1215
+ FOLDS. WILCOXON SIGNED RANK TEST IS CONDUCTED ON VOXEL-WISE DICE AND INSTANCE-WISE F-MEASURE. * INDICATES
1216
+ p-VALUE < 0.05. NS INDICATES NO SIGNIFICANCE.
1217
+ Stage
1218
+ Class
1219
+ Method
1220
+ Voxel-wise Metrics (%)
1221
+ Instance-wise Metrics (%)
1222
+ Dice
1223
+ Recall
1224
+ Precision
1225
+ F-measure
1226
+ Recall
1227
+ Precision
1228
+ Class-agnostic Seg
1229
+ -
1230
+ nnUNet
1231
+ 45.9∗
1232
+ 75.4
1233
+ 36.2
1234
+ 36.1∗
1235
+ 81.0
1236
+ 25.3
1237
+ (before identification)
1238
+ Ours
1239
+ 47.7ref
1240
+ 77.7
1241
+ 37.5
1242
+ 40.6ref
1243
+ 80.9
1244
+ 29.9
1245
+ Pos
1246
+ nnUNet
1247
+ 10.2∗
1248
+ 32.3
1249
+ 11.3
1250
+ 11.7∗
1251
+ 36.1
1252
+ 12.0
1253
+ Class-aware Seg
1254
+ Ours
1255
+ 12.0ref
1256
+ 38.9
1257
+ 11.7
1258
+ 13.5ref
1259
+ 41.5
1260
+ 13.3
1261
+ (after identification)
1262
+ Neg
1263
+ nnUNet
1264
+ 27.5NS
1265
+ 51.1
1266
+ 25.4
1267
+ 27.7∗
1268
+ 60.0
1269
+ 22.9
1270
+ Ours
1271
+ 27.7ref
1272
+ 49.0
1273
+ 27.0
1274
+ 28.9ref
1275
+ 56.2
1276
+ 25.8
1277
+ Image
1278
+ Label
1279
+ nnUNet
1280
+ Ours
1281
+ Positive Lymph Node
1282
+ Negative Lymph Node
1283
+ Attmap
1284
+ Organ&Vessel
1285
+ Organ&Vessel Anatomy
1286
+ (a) Lymph Node Segmentation and Identification Results
1287
+ (b) Organ&Vessel Segmentation and Attention Map Results
1288
+ Spleen
1289
+ RightKidney
1290
+ LeftKidney
1291
+ Gallbladder
1292
+ Esophagus
1293
+ Liver
1294
+ Stomach
1295
+ Aorta
1296
+ IVC
1297
+ PV&SV
1298
+ Pancreas
1299
+ RAG
1300
+ LAG
1301
+ Duodenum
1302
+ SMV
1303
+ SMA
1304
+ TC
1305
+ LGA
1306
+ CHA&PHA
1307
+ Spleen
1308
+ RightKidney
1309
+ LeftKidney
1310
+ Gallbladder
1311
+ Esophagus
1312
+ Liver
1313
+ Stomach
1314
+ Aorta
1315
+ IVC
1316
+ PV&SV
1317
+ Pancreas
1318
+ RAG
1319
+ LAG
1320
+ Duodenum
1321
+ SMV
1322
+ SMA
1323
+ TC
1324
+ LGA
1325
+ CHA&PHA
1326
+ Organs & Vessels
1327
+ Lymph Nodes
1328
+ 1.0
1329
+ 0.8
1330
+ 0.6
1331
+ 0.4
1332
+ 0.2
1333
+ 0.0
1334
+ Fig. 4. Examples of (a) LN segmentation and identification results, and (b) Organ&Vessel segmentation and attention map results.
1335
+ presence of positive LN in the segmentation results. We instead
1336
+ conduct ROC analysis on the volume of the largest positive
1337
+ LN in each case, and find an optimal threshold with the best
1338
+ balanced accuracy in the validation set. Then the threshold
1339
+ is applied to the testing set. A patients with positive LNs
1340
+ larger than the threshold are classified into metastasis-positive;
1341
+ otherwise, it is classified into metastasis-negative. The ablation
1342
+ models for consideration/comparison are listed as follows:
1343
+ • ClsfromPDAC: The straightforward strategy combining
1344
+ ResNet2D [29] feature and DeepTEN [54] feature, extracted
1345
+
1346
+ N980-2021103010
1347
+ TABLE VI
1348
+ PERFORMANCE COMPARISON AND ABLATION STUDY ON LN METASTASIS STATUS PREDICTION OF DISCOVERY DATASET. RESULTS ARE
1349
+ AVERAGED ACROSS 5 FOLDS. WILCOXON SIGNED RANK TEST IS CONDUCTED ON BALANCED ACCURACY. * INDICATES p-VALUE <
1350
+ 0.05. NS INDICATES NO SIGNIFICANCE.
1351
+ Method
1352
+ Balanced Accuracy
1353
+ AUC
1354
+ Accuracy
1355
+ Sensitivity
1356
+ Specificity
1357
+ [95% CI]
1358
+ [95% CI]
1359
+ [95% CI]
1360
+ [95% CI]
1361
+ [95% CI]
1362
+ CT-reported LN status
1363
+ 0.599∗
1364
+ -
1365
+ 0.599
1366
+ 0.588
1367
+ 0.609
1368
+ [0.564-0.634]
1369
+ [0.565-0.634]
1370
+ [0.538-0.635]
1371
+ [0.558-0.657]
1372
+ Radiomics
1373
+ 0.597∗
1374
+ 0.648
1375
+ 0.603
1376
+ 0.508
1377
+ 0.686
1378
+ [0.563-0.633]
1379
+ [0.598-0.681]
1380
+ [0.569-0.637]
1381
+ [0.456-0.561]
1382
+ [0.638-0.734]
1383
+ Radiomics +
1384
+ 0.604∗
1385
+ 0.654
1386
+ 0.610
1387
+ 0.524
1388
+ 0.684
1389
+ CT-reported LN status
1390
+ [0.572-0.641]
1391
+ [0.612-0.692]
1392
+ [0.575-0.644]
1393
+ [0.470-0.581]
1394
+ [0.641-0.731]
1395
+ ResNet3D
1396
+ 0.562∗
1397
+ 0.609
1398
+ 0.554
1399
+ 0.599
1400
+ 0.524
1401
+ [0.521-0.593]
1402
+ [0.550-0.631]
1403
+ [0.519-0.587]
1404
+ [0.538-0.644]
1405
+ [0.475-0.568]
1406
+ ResNet2D
1407
+ 0.571∗
1408
+ 0.631
1409
+ 0.574
1410
+ 0.568
1411
+ 0.574
1412
+ [0.540-0.609]
1413
+ [0.590-0.667]
1414
+ [0.537-0.607]
1415
+ [0.519-0.624]
1416
+ [0.530-0.628]
1417
+ DeepTEN
1418
+ 0.588∗
1419
+ 0.634
1420
+ 0.593
1421
+ 0.609
1422
+ 0.566
1423
+ [0.560-0.628]
1424
+ [0.599-0.679]
1425
+ [0.559-0.628]
1426
+ [0.564-0.667]
1427
+ [0.520-0.621]
1428
+ ClsfromPDAC
1429
+ 0.599∗
1430
+ 0.654
1431
+ 0.597
1432
+ 0.600
1433
+ 0.597
1434
+ [0.558-0.634]
1435
+ [0.608-0.685]
1436
+ [0.561-0.633]
1437
+ [0.547-0.647]
1438
+ [0.550-0.646]
1439
+ ClsbyLNSeg w/o Attn
1440
+ 0.545∗
1441
+ 0.590
1442
+ 0.566
1443
+ 0.433
1444
+ 0.657
1445
+ [0.525-0.593]
1446
+ [0.548-0.625]
1447
+ [0.534-0.601]
1448
+ [0.393-0.499]
1449
+ [0.623-0.716]
1450
+ ClsbyLNSeg w/ Attn
1451
+ 0.563∗
1452
+ 0.603
1453
+ 0.572
1454
+ 0.351
1455
+ 0.775
1456
+ [0.530-0.594]
1457
+ [0.564-0.642]
1458
+ [0.539-0.605]
1459
+ [0.299-0.396]
1460
+ [0.731-0.814]
1461
+ Ours (ClsfromPDAC +
1462
+ 0.633ref
1463
+ 0.682
1464
+ 0.635
1465
+ 0.618
1466
+ 0.649
1467
+ ClsbyLNSeg w/ Attn)
1468
+ [0.599-0.669]
1469
+ [0.640-0.717]
1470
+ [0.601-0.669]
1471
+ [0.567-0.664]
1472
+ [0.603-696]
1473
+ (a)
1474
+ (b)
1475
+ Fig. 5. ROC curve comparison of (a) ablation study and (b) baseline models and our method using nested five-fold cross-validation in Discovery dataset.
1476
+ from PDAC slices, in the input of the classification layer.
1477
+ • ClsbyLNSeg w/o Attn: Patient-level metastasis aggregation
1478
+ from the results of LN segmentation without attention (i.e.
1479
+ nnUNet).
1480
+ • ClsbyLNSeg w/ Attn: Patient-level metastasis aggregation
1481
+ from the results of our proposed LN segmentation with
1482
+ attention.
1483
+ • ClsfromPDAC + ClsbyLNSeg w/ Attn: Combined model
1484
+ incorporating the volume of the largest positive LN given
1485
+ by ClsbyLNSeg w/ Attn into the classification layer of
1486
+ ClsfromPDAC.
1487
+ The results of the ablation experiments are summarized in
1488
+ Table VI, and ROC analysis is illustrated in Fig. 5(a). By
1489
+ using only information about LNs, ClsbyLNSeg w/ Attn gives
1490
+ better aggregation results compared with ClsbyLNSeg w/o
1491
+ Attn (balanced accuracy 0.563 versus 0.545). Our final model
1492
+ (ClsfromPDAC + ClsbyLNSeg w/ Attn) significantly outper-
1493
+ forms the other three models with a balanced accuracy of 0.633
1494
+ (p-value < 0.05), which reveals the success of integrating both
1495
+ tumor and LNs imaging information for metastasis prediction.
1496
+ Comparison with Baselines. To validate the effective-
1497
+ ness of our method, radiomics model [8] and 2D/3D deep
1498
+ classification models are taken for comparison. To build the
1499
+ radiomics model, 1688 radiomics features of PDAC for each
1500
+
1501
+ Ablation study: ROC curve on testing set
1502
+ 1.0
1503
+ 0.8
1504
+ True Positive Rate
1505
+ 0.6
1506
+ 0.4
1507
+ 0.2
1508
+ ClsfromPDAC,AUC=0.654,p=0.038
1509
+ ClsbyLNSeg w/o Attn, AUC=0.590, p<0.001
1510
+ ClsbyLNSeg w/Attn,AUC=0.603,p<0.001
1511
+ 0.0
1512
+ ClsfromPDAC+ClsbyLNSeg w/ Attn, AUC=0.682, ref
1513
+ 0.0
1514
+ 0.2
1515
+ 0.4
1516
+ 0.6
1517
+ 0.8
1518
+ 1.0
1519
+ False Positive RateComparison with baselines: ROC curve on testing set
1520
+ 1.0
1521
+ 0.8
1522
+ True Positive Rate
1523
+ 0.6
1524
+ 0.4
1525
+ Radiomics.AUC=0.648.p=0.052
1526
+ Radiomics+Radiologists,AUC=0.654,p=0.16
1527
+ 0.2
1528
+ Resnet3D,AUC=0.609,p<0.0001
1529
+ Resnet2D,AUC=0.631,p=0.0014
1530
+ DeepTEN,AUC=0.634,p=0.0099
1531
+ 0.0
1532
+ Ours, AUC=0.682,ref
1533
+ 0.0
1534
+ 0.2
1535
+ 0.4
1536
+ 0.6
1537
+ 0.8
1538
+ 1.0
1539
+ False Positive Rate11
1540
+ CT phase are extracted using Pyradiomics package [57]1, and
1541
+ then shrunk using the least absolute shrinkage and selection
1542
+ operator (LASSO) method. Then a logistic regression model
1543
+ is applied to the selected features. The combined model of
1544
+ radiomics and CT-reported LN status is implemented with a
1545
+ logistic regression model on radiomics signature and radiolo-
1546
+ gists’ diagnosis. For 2D deep networks, ResNet2D [29] and
1547
+ DeepTEN [54], we use ResNet-18 backbone pre-trained on
1548
+ ImageNet [52]; while for 3D deep networks, we adopt 3D-
1549
+ ResNet-18 [58] backbone pre-trained on Kinetics-700 [59]
1550
+ and Moments in Time [60]. In all of 2D/3D deep networks,
1551
+ a side branch with the PDAC mask as input is added to
1552
+ the backbone, as we implemented in our method, for fair
1553
+ comparison. Table VI and Fig. 5(b) present the quantitative
1554
+ results of different models. More importantly, our method
1555
+ yields the best balanced accuracy (0.633) among all compared
1556
+ methods, and is significantly better than the radiomics method
1557
+ and all of 2D/3D deep networks.
1558
+ F. External Validation
1559
+ In this section, we demonstrate the generalization ability
1560
+ of our LN metastasis status prediction in two external multi-
1561
+ center datasets (i.e., Ext-validation dataset 1 and Ext-validation
1562
+ dataset 2). After training the model on Discovery dataset
1563
+ using cross validation, we apply the model to external datasets
1564
+ for inference. For each patient, the ensemble prediction is
1565
+ generated by averaging the model predictions from five folds.
1566
+ We first evaluate the performance of ablation variants, and
1567
+ then compare our method with baseline models. Metrics are
1568
+ used the same as Section IV-E.
1569
+ Ablation Study. We conduct ablation study on two external
1570
+ datasets, and results are shown in Table VII. With respect
1571
+ to LN metastasis status prediction using only LN-related
1572
+ information, our method (ClsbyLNSeg w/ Attn) outperforms
1573
+ nnUNet (ClsbyLNSeg w/o Attn) on both two datasets (bal-
1574
+ anced accuracy 0.589 versus 0.579 on Ext-validation dataset 1,
1575
+ 0.639 versus 0.607 on Ext-validation dataset 2). By integrating
1576
+ PDAC characteristics, our final model (ClsfromPDAC + Cls-
1577
+ byLNSeg w/ Attn) gives the best results among all ablation
1578
+ models (balanced accuracy 0.620 on Ext-validation dataset 1
1579
+ and 0.684 on Ext-validation dataset 2).
1580
+ Comparison with Baselines. Table VII validates the gener-
1581
+ alization performance of our method compared with radiomics
1582
+ and 2D/3D deep learning models. Note that we skip methods
1583
+ involved with CT-reported LN status since there is no CT
1584
+ report available in two external datasets. The radiomics model
1585
+ shows poor generalization ability with a large drop in perfor-
1586
+ mance compared with that in Table VI, while deep learning
1587
+ models are relatively more robust. Our method significantly
1588
+ surpasses all of 2D/2D deep learning models with p-value <
1589
+ 0.05 on both external datasets (balanced accuracy 0.620 and
1590
+ 0.684 respectively), demonstrating the power of our model to
1591
+ generalize across different data sites.
1592
+ 1https://pyradiomics.readthedocs.io/
1593
+ V. DISCUSSION
1594
+ Pre-operative LN metastasis status prediction is of vital sig-
1595
+ nificance for PDAC patients in the following aspects. Firstly,
1596
+ if diagnosed with LN metastasis, patients with resectable
1597
+ PDAC are recommended to receive neoadjuvant therapy first
1598
+ before surgery, according to NCCN guidelines [61]. Secondly,
1599
+ pancreatectomy could be guided by whether and where their
1600
+ LNs have metastasized, that is, whether or not a standard or an
1601
+ extended lymphadenectomy should be performed. This could
1602
+ make the surgical procedure being more targeted beforehand
1603
+ which could lead to better patient outcome and avoid over-
1604
+ treatment. Thirdly, LN metastasis is highly associated with pa-
1605
+ tients’ survival, which can evidently assist with good prognosis
1606
+ prediction value [55]. Note that it is very time consuming and
1607
+ highly dependent on (board-certified radiologist) observer’s
1608
+ experience and energy level to manually determine whether
1609
+ a patients has LN metastasis primarily from CT scans (even it
1610
+ is a very desirable task for patient care). CT-reported LN status
1611
+ in this study shows limited performance with an accuracy of
1612
+ 0.599, thus accurate LN metastasis status prediction is highly
1613
+ desired.
1614
+ In the literature, LN metastasis status prediction has pre-
1615
+ dominantly been studied through tumor feature extraction,
1616
+ combined with CT report information, using radiomics [6]–
1617
+ [12] or deep learning approaches [13], [14], while the one
1618
+ leveraging LN radiomics requires manual delineation and
1619
+ considers simply the LN with the largest size [10]. An
1620
+ automated and accurate process of LN segmentation and
1621
+ nodal positivity identification is hence of high importance for
1622
+ assisting metastasis prediction. Predicting the metastasis status
1623
+ from automated segmented LNs is formulated by detecting
1624
+ metastatic LNs with Faster R-CNN [62], however the spatial
1625
+ context priors towards LNs are not exploited. This work
1626
+ proposes an automated geometric attention mechanism using
1627
+ LN segmentation and identification to predict the patient-level
1628
+ status of LN metastasis.
1629
+ To demonstrate the effectiveness of our method, we pro-
1630
+ vide extensive quantitative experiments on LN segmenta-
1631
+ tion/identification and LN metastasis status prediction. Our
1632
+ LN segmentation model statistically significantly outperforms
1633
+ the strong baseline nnUNet in voxel-wise and instance-wise
1634
+ metrics. For LN instance-wise detection, our model achieves
1635
+ considerable quantitative improvements (4.6%) in precision
1636
+ (with respect to a similar recall level) as compared to nnUNet
1637
+ (see Table V). This observation clearly validates that the
1638
+ proposed distance-guided attention mechanism is beneficial
1639
+ to remove false positives as we expect. The success of our
1640
+ model can be attributed to its attention map design and
1641
+ informative negative selection scheme. The former defines the
1642
+ LN-plausible regions that deserve network’s focus, and the
1643
+ latter helps to throw out non-informative negative training
1644
+ patches accordingly. As such, it becomes more efficient to
1645
+ train and force the model to learn discriminative features from
1646
+ possible LN locations. To verify the effect of LN detection
1647
+ improvements on patient-level metastasis status prediction, we
1648
+ perform instance-wise positivity identification and patient-wise
1649
+ aggregation on the LN instances to classify the patients into
1650
+
1651
+ 12
1652
+ TABLE VII
1653
+ PERFORMANCE COMPARISON AND ABLATION STUDY ON LN METASTASIS STATUS PREDICTION OF TWO EXTERNAL DATASETS.
1654
+ PREDICTIONS ARE AVERAGED ACROSS 5 FOLDS. WILCOXON SIGNED RANK TEST IS CONDUCTED ON BALANCED ACCURACY. *
1655
+ INDICATES p-VALUE < 0.05. NS INDICATES NO SIGNIFICANCE.
1656
+ Dataset
1657
+ Method
1658
+ Balanced
1659
+ AUC
1660
+ Accuracy
1661
+ Sensitivity
1662
+ Specificity
1663
+ Accuracy
1664
+ [95% CI]
1665
+ [95% CI]
1666
+ [95% CI]
1667
+ [95% CI]
1668
+ [95% CI]
1669
+ Radiomics
1670
+ 0.493∗
1671
+ 0.511
1672
+ 0.672
1673
+ 0.051
1674
+ 0.935
1675
+ [0.451-0.537]
1676
+ [0.400-0.620]
1677
+ [0.626-0.710]
1678
+ [0.000-0.128]
1679
+ [0.880-0.978]
1680
+ ResNet3D
1681
+ 0.508∗
1682
+ 0.509
1683
+ 0.527
1684
+ 0.462
1685
+ 0.554
1686
+ [0.415-0.612]
1687
+ [0.409-0.617]
1688
+ [0.450-0.611]
1689
+ [0.308-0.615]
1690
+ [0.457-0.663]
1691
+ ResNet2D
1692
+ 0.563∗
1693
+ 0.564
1694
+ 0.542
1695
+ 0.615
1696
+ 0.511
1697
+ [0.470-0.656]
1698
+ [0.460-0.676]
1699
+ [0.466-0.626]
1700
+ [0.462-0.769]
1701
+ [0.413-0.609]
1702
+ DeepTEN
1703
+ 0.556∗
1704
+ 0.557
1705
+ 0.511
1706
+ 0.667
1707
+ 0.446
1708
+ Ext-validation
1709
+ [0.467-0.647]
1710
+ [0.454-0.666]
1711
+ [0.427-0.595]
1712
+ [0.513-0.795]
1713
+ [0.348-0.544]
1714
+ dataset 1
1715
+ ClsfromPDAC
1716
+ 0.515∗
1717
+ 0.554
1718
+ 0.485
1719
+ 0.590
1720
+ 0.441
1721
+ [0.423-0.609]
1722
+ [0.450-0.661]
1723
+ [0.402-0.568]
1724
+ [0.436-0.744]
1725
+ [0.344-0.548]
1726
+ ClsbyLNSeg w/o Attn
1727
+ 0.579∗
1728
+ 0.555
1729
+ 0.689
1730
+ 0.308
1731
+ 0.849
1732
+ [0.498-0.662]
1733
+ [0.454-0.641]
1734
+ [0.621-0.750]
1735
+ [0.179-0.462]
1736
+ [0.774-0.914]
1737
+ ClsbyLNSeg w/ Attn
1738
+ 0.589∗
1739
+ 0.580
1740
+ 0.705
1741
+ 0.308
1742
+ 0.871
1743
+ [0.511-0.672]
1744
+ [0.474-0.694]
1745
+ [0.644-0.765]
1746
+ [0.154-0.462]
1747
+ [0.796-0.935]
1748
+ Ours (ClsfromPDAC +
1749
+ 0.620ref
1750
+ 0.603
1751
+ 0.674
1752
+ 0.487
1753
+ 0.753
1754
+ ClsbyLNSeg w/ Attn)
1755
+ [0.538-0.713]
1756
+ [0.498-0.712]
1757
+ [0.598-0.742]
1758
+ [0.333-0.641]
1759
+ [0.667-0.839]
1760
+ Radiomics
1761
+ 0.508∗
1762
+ 0.609
1763
+ 0.441
1764
+ 0.243
1765
+ 0.773
1766
+ [0.391-0.626]
1767
+ [0.452-0.757]
1768
+ [0.339-0.542]
1769
+ [0.108-0.378]
1770
+ [0.591-0.909]
1771
+ ResNet3D
1772
+ 0.461∗
1773
+ 0.442
1774
+ 0.475
1775
+ 0.514
1776
+ 0.409
1777
+ [0.334-0.584]
1778
+ [0.300-0.593]
1779
+ [0.356-0.594]
1780
+ [0.351-0.676]
1781
+ [0.182-0.591]
1782
+ ResNet2D
1783
+ 0.681∗
1784
+ 0.687
1785
+ 0.650
1786
+ 0.577
1787
+ 0.786
1788
+ [0.536-0.810]
1789
+ [0.508-0.849]
1790
+ [0.500-0.800]
1791
+ [0.385-0.731]
1792
+ [0.571-0.930]
1793
+ DeepTEN
1794
+ 0.613∗
1795
+ 0.647
1796
+ 0.640
1797
+ 0.697
1798
+ 0.529
1799
+ Ext-validation
1800
+ [0.465-0.747]
1801
+ [0.474-0.806]
1802
+ [0.520-0.760]
1803
+ [0.545-0.848]
1804
+ [0.294-0.765]
1805
+ dataset 2
1806
+ ClsfromPDAC
1807
+ 0.620∗
1808
+ 0.639
1809
+ 0.593
1810
+ 0.514
1811
+ 0.727
1812
+ [0.493-0.734]
1813
+ [0.490-0.781]
1814
+ [0.475-0.712]
1815
+ [0.378-0.676]
1816
+ [0.545-0.909]
1817
+ ClsbyLNSeg w/o Attn
1818
+ 0.607∗
1819
+ 0.690
1820
+ 0.542
1821
+ 0.351
1822
+ 0.864
1823
+ [0.503-0.716]
1824
+ [0.554-0.818]
1825
+ [0.441-0.661]
1826
+ [0.216-0.487]
1827
+ [0.682-1.000]
1828
+ ClsbyLNSeg w/ Attn
1829
+ 0.639∗
1830
+ 0.695
1831
+ 0.593
1832
+ 0.459
1833
+ 0.818
1834
+ [0.525-0.752]
1835
+ [0.552-0.833]
1836
+ [0.475-0.695]
1837
+ [0.297-0.622]
1838
+ [0.636-0.955]
1839
+ Ours (ClsfromPDAC +
1840
+ 0.684ref
1841
+ 0.703
1842
+ 0.661
1843
+ 0.595
1844
+ 0.773
1845
+ ClsbyLNSeg w/ Attn)
1846
+ [0.570-0.797]
1847
+ [0.554-0.846]
1848
+ [0.542-0.780]
1849
+ [0.432-0.757]
1850
+ [0.591-0.909]
1851
+ metastasis-positive/-negative, and our model presents better
1852
+ prediction performance than nnUNet (balanced accuracy 0.563
1853
+ versus 0.545, Table VI). We further combine the results
1854
+ with tumor CT imaging characteristics and our final pre-
1855
+ diction model achieves statistically significant performance
1856
+ gains compared to radiomics methods and other deep 2D/3D
1857
+ models (see Table VI), which demonstrates the success and
1858
+ effectiveness of integrating tumor morphology and lymphatic
1859
+ anatomy. It is worth noting that our method achieves sta-
1860
+ tistically significant improvement (balanced accuracy 0.633
1861
+ versus 0.604) compared to the approach even with radiologists
1862
+ involved in “Radiomics + CT-reported LN status” in Table
1863
+ VI. Nevertheless, using our method, this time-consuming,
1864
+ subjective and highly challenging manual process of CT-
1865
+ reported LN status can be fully automated. External multi-
1866
+ center clinical validation is further conducted on extra two
1867
+ datasets from different hospitals, and the results evidently
1868
+ exhibit our superior performance accuracy and generalization
1869
+ ability with the best results (balanced accuracy 0.620 and
1870
+ 0.684 on the two external datasets) among several compared
1871
+ models (see Table VII). With all above-mentioned experi-
1872
+ ments, our model reports highly generalized prediction per-
1873
+ formance (0.620∼0.684) on multi-center datasets and robust
1874
+ improvements over CT-reported LN status (0.599) as well as
1875
+ radiomics and deep learning models, which clearly clarifies
1876
+ the advantage and stability of our model.
1877
+ Although recent work [8], [11] report exceedingly high
1878
+ accuracy (AUC > 0.9), they use small datasets of < 200
1879
+ patients, which would be subject to overfitting. Another recent
1880
+ progress in gastric cancer [12] enrolls over 500 patients from
1881
+ multiple hospitals, and yields noticeably lower but probably
1882
+ more reliable AUC score of 0.615∼0.712 in validation using
1883
+ 2D/2.5D/3D radiomics features and under different patient
1884
+ splits. [12] is probably more suitable to serve as reference
1885
+ baseline for our work. We employs 940 patients in total in this
1886
+ study, in which 749 patients are from a high-volume pancreatic
1887
+ cancer clinical center and 191 are from two external centers.
1888
+ The studied patient population is arguably much closer and
1889
+ more realistic to the real-world patient data distributions com-
1890
+ paring to [8], [11], similar to [12]. We present a very promising
1891
+ approach that explicitly explores the role of automated LN
1892
+ segmentation in promoting LN metastasis status prediction
1893
+ to facilitate future clinical adoption as a fully-automated and
1894
+ generalizable clinical tool. One limitation of our framework
1895
+
1896
+ 13
1897
+ lies in the intuitive but simple solution that extracts tumor
1898
+ and LNs imaging information separately and then integrates
1899
+ them by feature concatenation, which does not fully exploit
1900
+ the nature of interactions between tumor and cancer cells
1901
+ in LNs. This work could be further improved in the future
1902
+ by designing an enriched deep learning geometric network
1903
+ representation to encode the tumor-LN topology information
1904
+ and spatial anatomical interactions, by modeling the pathways
1905
+ of nodal metastasis explicitly.
1906
+ Last, without loss of generality, our proposed method is
1907
+ applicable for finding the preoperative LN metastasis status of
1908
+ other types of solid tumor or cancers, such as liver or gastric
1909
+ cancers. We leave this as future work.
1910
+ VI. CONCLUSION
1911
+ We present an attention based LN segmentation network and
1912
+ utilize it on predicting LN metastasis in patients with PDAC.
1913
+ The proposed LN segmentation network involves an attention
1914
+ mechanism that encourages the network to focus on regions
1915
+ around certain anatomical organs/vessels. It outperforms the
1916
+ strong baseline nnUNet [27] by leveraging the context infor-
1917
+ mation of surrounding anatomical structures. Our segmenta-
1918
+ tion model, followed by a nodal positivity identification model,
1919
+ can serve as a single predictor for LN metastasis. Combined
1920
+ with tumor imaging characteristics, we further build a compos-
1921
+ itive LN metastasis status prediction model that is validated
1922
+ to surpass the CT-reported results, radiomics based method,
1923
+ and other 2D/3D deep learning models. Further investigations
1924
+ include conceiving a more complicated way to encode tumor-
1925
+ LN relationship and exploring its applications to prognosis and
1926
+ treatment planning in cancer patient management.
1927
+ ACKNOWLEDGMENTS
1928
+ This
1929
+ work
1930
+ was
1931
+ supported
1932
+ in
1933
+ part
1934
+ by
1935
+ the
1936
+ National
1937
+ Science Foundation for Scientists of China (81871352,
1938
+ 82171915, and 82171930), Clinical Research Plan of SHDC
1939
+ (SHDC2020CR4073), 234 Platform Discipline Consolidation
1940
+ Foundation Project (2019YPT001, 2020YPT001), and The
1941
+ Natural Science Foundation of Shanghai Science and Technol-
1942
+ ogy Innovation Action Plan (21ZR1478500, 21Y11910300).
1943
+ REFERENCES
1944
+ [1] R. L. Siegel, K. D. Miller, H. E. Fuchs, and A. Jemal, “Cancer statistics,
1945
+ 2021.” CA: a cancer journal for clinicians, vol. 71, no. 1, pp. 7–33,
1946
+ 2021.
1947
+ [2] A. J. Grossberg, L. C. Chu, C. R. Deig, E. K. Fishman, W. L. Hwang,
1948
+ A. Maitra, D. L. Marks, A. Mehta, N. Nabavizadeh, D. M. Simeone
1949
+ et al., “Multidisciplinary standards of care and recent progress in pan-
1950
+ creatic ductal adenocarcinoma,” CA: a Cancer Journal for Clinicians,
1951
+ vol. 70, no. 5, pp. 375–403, 2020.
1952
+ [3] C. L. Roland, A. D. Yang, M. H. Katz, D. Chatterjee, H. Wang, H. Lin,
1953
+ J. N. Vauthey, P. W. Pisters, G. R. Varadhachary, R. A. Wolff et al.,
1954
+ “Neoadjuvant therapy is associated with a reduced lymph node ratio in
1955
+ patients with potentially resectable pancreatic cancer,” Annals of surgical
1956
+ oncology, vol. 22, no. 4, pp. 1168–1175, 2015.
1957
+ [4] M. Kanda, T. Fujii, S. Nagai, Y. Kodera, A. Kanzaki, T. T. Sahin,
1958
+ M. Hayashi, S. Yamada, H. Sugimoto, S. Nomoto et al., “Pattern of
1959
+ lymph node metastasis spread in pancreatic cancer,” Pancreas, vol. 40,
1960
+ no. 6, pp. 951–955, 2011.
1961
+ [5] D. S. Tseng, H. C. van Santvoort, S. Fegrachi, M. G. Besselink, N. P.
1962
+ Zuithoff, I. H. B. Rinkes, M. S. van Leeuwen, and I. Q. Molenaar,
1963
+ “Diagnostic accuracy of ct in assessing extra-regional lymphadenopathy
1964
+ in pancreatic and peri-ampullary cancer: a systematic review and meta-
1965
+ analysis,” Surgical oncology, vol. 23, no. 4, pp. 229–235, 2014.
1966
+ [6] G.-W. Ji, Y.-D. Zhang, H. Zhang, F.-P. Zhu, K. Wang, Y.-X. Xia, Y.-
1967
+ D. Zhang, W.-J. Jiang, X.-C. Li, and X.-H. Wang, “Biliary tract cancer
1968
+ at ct: a radiomics-based model to predict lymph node metastasis and
1969
+ survival outcomes,” Radiology, vol. 290, no. 1, pp. 90–98, 2019.
1970
+ [7] Y. Wang, W. Liu, Y. Yu, J.-j. Liu, H.-d. Xue, Y.-f. Qi, J. Lei, J.-c. Yu,
1971
+ and Z.-y. Jin, “Ct radiomics nomogram for the preoperative prediction of
1972
+ lymph node metastasis in gastric cancer,” European radiology, vol. 30,
1973
+ no. 2, pp. 976–986, 2020.
1974
+ [8] K. Li, Q. Yao, J. Xiao, M. Li, J. Yang, W. Hou, M. Du, K. Chen, Y. Qu,
1975
+ L. Li et al., “Contrast-enhanced ct radiomics for predicting lymph node
1976
+ metastasis in pancreatic ductal adenocarcinoma: a pilot study,” Cancer
1977
+ Imaging, vol. 20, no. 1, pp. 1–10, 2020.
1978
+ [9] Y. Bian, S. Guo, H. Jiang, S. Gao, C. Shao, K. Cao, X. Fang, J. Li,
1979
+ L. Wang, W. Hua et al., “Relationship between radiomics and risk of
1980
+ lymph node metastasis in pancreatic ductal adenocarcinoma,” Pancreas,
1981
+ vol. 48, no. 9, p. 1195, 2019.
1982
+ [10] J. Yang, Q. Wu, L. Xu, Z. Wang, K. Su, R. Liu, E. A. Yen, S. Liu, J. Qin,
1983
+ Y. Rong et al., “Integrating tumor and nodal radiomics to predict lymph
1984
+ node metastasis in gastric cancer,” Radiotherapy and Oncology, vol. 150,
1985
+ pp. 89–96, 2020.
1986
+ [11] J. Gao, F. Han, Y. Jin, X. Wang, and J. Zhang, “A radiomics nomogram
1987
+ for the preoperative prediction of lymph node metastasis in pancreatic
1988
+ ductal adenocarcinoma,” Frontiers in oncology, vol. 10, p. 1654, 2020.
1989
+ [12] L. Meng, D. Dong, X. Chen, M. Fang, R. Wang, J. Li, Z. Liu, and
1990
+ J. Tian, “2d and 3d ct radiomic features performance comparison in
1991
+ characterization of gastric cancer: a multi-center study,” IEEE journal
1992
+ of biomedical and health informatics, vol. 25, no. 3, pp. 755–763, 2020.
1993
+ [13] C. Jin, Y. Jiang, H. Yu, W. Wang, B. Li, C. Chen, Q. Yuan, Y. Hu,
1994
+ Y. Xu, Z. Zhou et al., “Deep learning analysis of the primary tumour
1995
+ and the prediction of lymph node metastases in gastric cancer,” British
1996
+ Journal of Surgery, vol. 108, no. 5, pp. 542–549, 2021.
1997
+ [14] D. Dong, M.-J. Fang, L. Tang, X.-H. Shan, J.-B. Gao, F. Giganti, R.-P.
1998
+ Wang, X. Chen, X.-X. Wang, D. Palumbo et al., “Deep learning radiomic
1999
+ nomogram can predict the number of lymph node metastasis in locally
2000
+ advanced gastric cancer: an international multicenter study,” Annals of
2001
+ Oncology, vol. 31, no. 7, pp. 912–920, 2020.
2002
+ [15] S. H. Kim, B.-I. Song, B. W. Kim, H. W. Kim, K. S. Won, S. U. Bae,
2003
+ W. K. Jeong, and S. K. Baek, “Predictive value of [18f] fdg pet/ct for
2004
+ lymph node metastasis in rectal cancer,” Scientific Reports, vol. 9, no. 1,
2005
+ pp. 1–7, 2019.
2006
+ [16] A. Asagi, K. Ohta, J. Nasu, M. Tanada, S. Nadano, R. Nishimura,
2007
+ N. Teramoto, K. Yamamoto, T. Inoue, and H. Iguchi, “Utility of contrast-
2008
+ enhanced fdg-pet/ct in the clinical management of pancreatic cancer:
2009
+ impact on diagnosis, staging, evaluation of treatment response, and
2010
+ detection of recurrence,” Pancreas, vol. 42, no. 1, pp. 11–19, 2013.
2011
+ [17] H. Dahmarde, F. Parooie, and M. Salarzaei, “Is 18f-fdg pet/ct an accurate
2012
+ way to detect lymph node metastasis in colorectal cancer: a systematic
2013
+ review and meta-analysis,” Contrast Media & Molecular Imaging, vol.
2014
+ 2020, 2020.
2015
+ [18] D. S. Tseng, B. K. Pranger, M. S. van Leeuwen, J. P. Pennings, L. A.
2016
+ Brosens, N. H. Mohammad, V. E. de Meijer, H. C. van Santvoort,
2017
+ J. I. Erdmann, and I. Q. Molenaar, “The role of ct in assessment of
2018
+ extraregional lymph node involvement in pancreatic and periampullary
2019
+ cancer: A diagnostic accuracy study,” Radiology: Imaging Cancer,
2020
+ vol. 3, no. 2, 2021.
2021
+ [19] W. Jung, K. R. Park, K.-J. Lee, K. Kim, J. Lee, S. Jeong, Y.-J. Kim,
2022
+ J. Kim, H.-J. Yoon, B.-C. Kang et al., “Value of imaging study in
2023
+ predicting pelvic lymph node metastases of uterine cervical cancer,”
2024
+ Radiation Oncology Journal, vol. 35, no. 4, p. 340, 2017.
2025
+ [20] L. Kanehara & Co., Classification of Pancreas Carcinoma (Fourth
2026
+ English Edition).
2027
+ Japan Pancreas Society, 2017.
2028
+ [21] E. A. Eisenhauer, P. Therasse, J. Bogaerts, L. H. Schwartz, D. Sargent,
2029
+ R. Ford, J. Dancey, S. Arbuck, S. Gwyther, M. Mooney et al., “New
2030
+ response evaluation criteria in solid tumours: revised recist guideline
2031
+ (version 1.1),” European J. of cancer, vol. 45, no. 2, pp. 228–247, 2009.
2032
+ [22] H. Oda, H. R. Roth, K. K. Bhatia, M. Oda, T. Kitasaka, S. Iwano,
2033
+ H. Homma, H. Takabatake, M. Mori, H. Natori et al., “Dense volumetric
2034
+ detection and segmentation of mediastinal lymph nodes in chest ct
2035
+ images,” in Medical Imaging 2018: Computer-Aided Diagnosis, 2018.
2036
+ [23] D. Bouget, A. Jørgensen, G. Kiss, H. O. Leira, and T. Langø, “Semantic
2037
+ segmentation and detection of mediastinal lymph nodes and anatomical
2038
+
2039
+ 14
2040
+ structures in ct data for lung cancer staging,” Int. J. of computer assisted
2041
+ radiology and surgery, vol. 14, no. 6, pp. 977–986, 2019.
2042
+ [24] D. Guo, X. Ye, J. Ge, X. Di, L. Lu, L. Huang, G. Xie, J. Xiao, Z. Lu,
2043
+ L. Peng et al., “Deepstationing: thoracic lymph node station parsing in
2044
+ ct scans using anatomical context encoding and key organ auto-search,”
2045
+ in MICCAI, 2021, pp. 3–12.
2046
+ [25] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks
2047
+ for biomedical image segmentation,” in MICCAI, 2015, pp. 234–241.
2048
+ [26]
2049
+ ¨O. C¸ ic¸ek, A. Abdulkadir, S. S. Lienkamp, T. Brox, and O. Ronneberger,
2050
+ “3d u-net: learning dense volumetric segmentation from sparse annota-
2051
+ tion,” in MICCAI, 2016, pp. 424–432.
2052
+ [27] F. Isensee, P. F. Jaeger, S. A. Kohl, J. Petersen, and K. H. Maier-Hein,
2053
+ “nnu-net: a self-configuring method for deep learning-based biomedical
2054
+ image segmentation,” Nature methods, vol. 18, no. 2, pp. 203–211, 2021.
2055
+ [28] D. Bouget, A. Pedersen, J. Vanel, H. O. Leira, and T. Langø, “Mediasti-
2056
+ nal lymph nodes segmentation using 3d convolutional neural network
2057
+ ensembles and anatomical priors guiding,” arXiv:2102.06515, 2021.
2058
+ [29] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image
2059
+ recognition,” in IEEE CVPR, 2016, pp. 770–778.
2060
+ [30] M. Feuerstein, B. Glocker, T. Kitasaka, Y. Nakamura, S. Iwano, and
2061
+ K. Mori, “Mediastinal atlas creation from 3-d chest computed tomogra-
2062
+ phy images: application to automated detection and station mapping of
2063
+ lymph nodes,” Medical image analysis, vol. 16, no. 1, pp. 63–74, 2012.
2064
+ [31] J. Liu, J. Hoffman, J. Zhao, J. Yao, L. Lu, L. Kim, E. B. Turkbey,
2065
+ and R. M. Summers, “Mediastinal lymph node detection and station
2066
+ mapping on chest ct using spatial priors and random forest,” Medical
2067
+ physics, vol. 43, no. 7, pp. 4362–4374, 2016.
2068
+ [32] J. Liu, J. Zhao, J. Hoffman, J. Yao, W. Zhang, E. B. Turkbey, S. Wang,
2069
+ C. Kim, and R. M. Summers, “Mediastinal lymph node detection on
2070
+ thoracic ct scans using spatial prior from multi-atlas label fusion,” in
2071
+ Medical Imaging 2014: Computer-Aided Diagnosis, vol. 9035, 2014, p.
2072
+ 90350M.
2073
+ [33] H. Oda, K. K. Bhatia, M. Oda, T. Kitasaka, S. Iwano, H. Homma,
2074
+ H. Takabatake, M. Mori, H. Natori, J. A. Schnabel et al., “Hessian-
2075
+ assisted supervoxel: structure-oriented voxel clustering and application
2076
+ to mediastinal lymph node detection from ct volumes,” in Medical
2077
+ Imaging 2017: Computer-Aided Diagnosis, 2017.
2078
+ [34] H. Seo, C. Huang, M. Bassenne, R. Xiao, and L. Xing, “Modified u-net
2079
+ (mu-net) with incorporation of object-dependent high level features for
2080
+ improved liver and liver-tumor segmentation in ct images,” IEEE trans.
2081
+ on medical imaging, vol. 39, no. 5, pp. 1316–1325, 2019.
2082
+ [35] C. Huang, H. Han, Q. Yao, S. Zhu, and S. K. Zhou, “3d u2-net: a
2083
+ 3d universal u-net for multi-domain medical image segmentation,” in
2084
+ MICCAI, 2019, pp. 291–299.
2085
+ [36] S. Kazemifar, A. Balagopal, D. Nguyen, S. McGuire, R. Hannan,
2086
+ S. Jiang, and A. Owrangi, “Segmentation of the prostate and organs at
2087
+ risk in male pelvic ct images using deep learning,” Biomedical Physics
2088
+ & Engineering Express, vol. 4, no. 5, p. 055003, 2018.
2089
+ [37] O. Oktay, J. Schlemper, L. L. Folgoc, M. Lee, M. Heinrich, and
2090
+ et al., “Attention u-net: Learning where to look for the pancreas,”
2091
+ arXiv:1804.03999, 2018.
2092
+ [38] S. E. Gerard and J. M. Reinhardt, “Pulmonary lobe segmentation using
2093
+ a sequence of convolutional neural networks for marginal learning,” in
2094
+ IEEE ISBI, 2019, pp. 1207–1211.
2095
+ [39] K. He, G. Gkioxari, P. Doll´ar, and R. Girshick, “Mask r-cnn,” in IEEE
2096
+ ICCV, 2017, pp. 2961–2969.
2097
+ [40] V. Kumar, Y. Gu, S. Basu, A. Berglund, S. A. Eschrich, M. B. Schabath,
2098
+ K. Forster, H. J. Aerts, A. Dekker, D. Fenstermacher et al., “Radiomics:
2099
+ the process and the challenges,” Magnetic resonance imaging, vol. 30,
2100
+ no. 9, pp. 1234–1248, 2012.
2101
+ [41] R. J. Gillies, P. E. Kinahan, and H. Hricak, “Radiomics: images are more
2102
+ than pictures, they are data,” Radiology, vol. 278, no. 2, pp. 563–577,
2103
+ 2016.
2104
+ [42] P. Lambin, E. Rios-Velazquez, R. Leijenaar, S. Carvalho, R. G.
2105
+ Van Stiphout, P. Granton, C. M. Zegers, R. Gillies, R. Boellard,
2106
+ A. Dekker et al., “Radiomics: extracting more information from medical
2107
+ images using advanced feature analysis,” European journal of cancer,
2108
+ vol. 48, no. 4, pp. 441–446, 2012.
2109
+ [43] N.-M. Cheng, J. Yao, J. Cai, X. Ye, S. Zhao, K. Zhao, W. Zhou,
2110
+ I. Nogues, Y. Huo, C.-T. Liao, H.-M. Wang, C.-Y. Lin, L.-Y. Lee, J. Xiao,
2111
+ L. Lu, L. Zhang, and T.-C. Yen, “Deep learning for fully-automated
2112
+ prediction of overall survival in patients with oropharyngeal cancer using
2113
+ fdg pet imaging: an international retrospective study,” Clinical Cancer
2114
+ Research, vol. 27, no. 14, pp. 3948–3959, 2021.
2115
+ [44] Y. Xu, A. Hosny, R. Zeleznik, C. Parmar, T. Coroller, I. Franco, R. H.
2116
+ Mak, and H. J. Aerts, “Deep learning predicts lung cancer treatment
2117
+ response from serial medical imaging,” Clinical Cancer Research,
2118
+ vol. 25, no. 11, pp. 3266–3275, 2019.
2119
+ [45] Y. Xia, J. Yao, L. Lu, L. Huang, G. Xie, J. Xiao, A. Yuille, K. Cao,
2120
+ and L. Zhang, “Effective pancreatic cancer screening on non-contrast ct
2121
+ scans via anatomy-aware transformers,” in MICCAI, 2021, pp. 259–269.
2122
+ [46] T. Zhao, K. Cao, J. Yao, I. Nogues, L. Lu, L. Huang, J. Xiao,
2123
+ Z. Yin, and L. Zhang, “3d graph anatomy geometry-integrated network
2124
+ for pancreatic mass segmentation, diagnosis, and quantitative patient
2125
+ management,” in IEEE CVPR, 2021, pp. 13 743–13 752.
2126
+ [47] J. Yao, Y. Shi, L. Lu, J. Xiao, and L. Zhang, “Deepprognosis: Preop-
2127
+ erative prediction of pancreatic cancer survival and surgical margin via
2128
+ contrast-enhanced ct imaging,” in MICCAI, 2020, pp. 272–282.
2129
+ [48] X. Zheng, Z. Yao, Y. Huang, Y. Yu, Y. Wang, Y. Liu, R. Mao, F. Li,
2130
+ Y. Xiao, Y. Wang et al., “Deep learning radiomics can predict axillary
2131
+ lymph node status in early-stage breast cancer,” Nature communications,
2132
+ vol. 11, no. 1, pp. 1–9, 2020.
2133
+ [49] S. A. Harmon, T. H. Sanford, G. T. Brown, C. Yang, S. Mehralivand,
2134
+ J. M. Jacob, V. A. Valera, J. H. Shih, P. K. Agarwal, P. L. Choyke et al.,
2135
+ “Multiresolution application of artificial intelligence in digital pathology
2136
+ for prediction of positive lymph nodes from primary tumors in bladder
2137
+ cancer,” JCO clinical cancer informatics, vol. 4, pp. 367–382, 2020.
2138
+ [50] G. Huang, Z. Liu, L. Van Der Maaten, and K. Q. Weinberger, “Densely
2139
+ connected convolutional networks,” in IEEE CVPR, 2017, pp. 4700–
2140
+ 4708.
2141
+ [51] S. S. M. Salehi, D. Erdogmus, and A. Gholipour, “Tversky loss function
2142
+ for image segmentation using 3d fully convolutional deep networks,” in
2143
+ MLMI, 2017, pp. 379–387.
2144
+ [52] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei, “Imagenet:
2145
+ A large-scale hierarchical image database,” in IEEE CVPR. Ieee, 2009,
2146
+ pp. 248–255.
2147
+ [53] S. Eppel, “Classifying a specific image region using convolutional nets
2148
+ with an roi mask as input,” arXiv:1812.00291, 2018.
2149
+ [54] H. Zhang, J. Xue, and K. Dana, “Deep ten: Texture encoding network,”
2150
+ in IEEE CVPR, 2017, pp. 708–717.
2151
+ [55] J. Yao, Y. Shi, K. Cao, L. Lu, J. Lu, Q. Song, G. Jin, J. Xiao, Y. Hou,
2152
+ and L. Zhang, “Deepprognosis: Preoperative prediction of pancreatic
2153
+ cancer survival and surgical margin via comprehensive understanding
2154
+ of dynamic contrast-enhanced ct imaging and tumor-vascular contact
2155
+ parsing,” Medical image analysis, vol. 73, p. 102150, 2021.
2156
+ [56] M. P. Heinrich, M. Jenkinson, B. W. Papie˙z, S. M. Brady, and J. A.
2157
+ Schnabel, “Towards realtime multimodal fusion for image-guided inter-
2158
+ ventions using self-similarities,” in MICCAI, 2013, pp. 187–194.
2159
+ [57] J. J. Van Griethuysen, A. Fedorov, C. Parmar, A. Hosny, N. Aucoin,
2160
+ V. Narayan, R. G. Beets-Tan, J.-C. Fillion-Robin, S. Pieper, and H. J.
2161
+ Aerts, “Computational radiomics system to decode the radiographic
2162
+ phenotype,” Cancer research, vol. 77, no. 21, pp. e104–e107, 2017.
2163
+ [58] K. Hara, H. Kataoka, and Y. Satoh, “Can spatiotemporal 3d cnns retrace
2164
+ the history of 2d cnns and imagenet?” in IEEE CVPR, 2018, pp. 6546–
2165
+ 6555.
2166
+ [59] W. Kay, J. Carreira, K. Simonyan, and et al., “The kinetics human action
2167
+ video dataset,” arXiv:1705.06950, 2017.
2168
+ [60] M. Monfort, A. Andonian, B. Zhou, K. Ramakrishnan, S. A. Bargal,
2169
+ T. Yan, L. Brown, Q. Fan, D. Gutfreund, C. Vondrick et al., “Moments
2170
+ in time dataset: one million videos for event understanding,” IEEE trans.
2171
+ on pat. analy. mach. intel., vol. 42, no. 2, pp. 502–508, 2019.
2172
+ [61] M. A. Tempero, M. P. Malafa, M. Al-Hawary, S. W. Behrman,
2173
+ A. B. Benson, D. B. Cardin, E. G. Chiorean, V. Chung, B. Czito,
2174
+ M. Del Chiaro et al., “Pancreatic adenocarcinoma, version 2.2021,
2175
+ nccn clinical practice guidelines in oncology,” Journal of the National
2176
+ Comprehensive Cancer Network, vol. 19, no. 4, pp. 439–457, 2021.
2177
+ [62] Y. Lu, Q. Yu, Y. Gao, Y. Zhou, G. Liu, Q. Dong, J. Ma, L. Ding,
2178
+ H. Yao, Z. Zhang et al., “Identification of metastatic lymph nodes in
2179
+ mr imaging with faster region-based convolutional neural networks,”
2180
+ Cancer research, vol. 78, no. 17, pp. 5135–5143, 2018.
2181
+
2tAzT4oBgHgl3EQffPw3/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3NFKT4oBgHgl3EQfQi0f/content/2301.11767v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b516514900454157846afb7dc08b67e0ae49464ba90e654638733b3bc12e5cb
3
+ size 463168
3NFKT4oBgHgl3EQfQi0f/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:723ed7693429a108817b2f562301a6ad62723ac4cef7a2ecadf9da0e47d37dde
3
+ size 2752557
3NFKT4oBgHgl3EQfQi0f/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2b93e8a8b9dd0ff30800f883df5af0b1b8951e2f4aa29c575582408f37f684c
3
+ size 109052
3tFST4oBgHgl3EQfZDim/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89045a6211476ed51665b648c486d9ac008198678b7beb62bda47f85e3af7ea9
3
+ size 5439533
4dFIT4oBgHgl3EQf6yuB/content/tmp_files/2301.11395v1.pdf.txt ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cubic Double Perovskites Host Noncoplanar Spin Textures
2
+ Joseph A. M. Paddison,1, ∗ Hao Zhang,2 Jiaqiang Yan,1 Matthew J. Cliffe,3 Seung-Hwan Do,1 Shang Gao,1, 4
3
+ Matthew B. Stone,4 David Dahlbom,2 Kipton Barros,5 Cristian D. Batista,6 and Andrew D. Christianson1, †
4
+ 1Materials Science and Technology Division, Oak Ridge National Laboratory, Oak Ridge, TN 37831, USA
5
+ 2Department of Physics and Astronomy, University of Tennessee, Knoxville, Tennessee 37996, USA
6
+ 3School of Chemistry, University of Nottingham, Nottingham NG7 2RD, UK
7
+ 4Neutron Scattering Division, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37831, USA
8
+ 5Theoretical Division, Los Alamos National Laboratory, Los Alamos, New Mexico 87545, USA
9
+ 6Department of Physics and Astronomy, The University of Tennessee, Knoxville, Tennessee 37996, USA
10
+ Magnetic materials with noncoplanar magnetic structures can show unusual physical properties driven by
11
+ nontrivial topology. Topologically-active states are often multi-q structures, which are challenging to stabilize
12
+ in models and to identify in materials. Here, we use inelastic neutron-scattering experiments to show that the
13
+ insulating double perovskites Ba2YRuO6 and Ba2LuRuO6 host a noncoplanar 3-q structure on the face-centered
14
+ cubic lattice. Quantitative analysis of our neutron-scattering data reveals that these 3-q states are stabilized by
15
+ biquadratic interactions. Our study identifies double perovskites as a highly promising class of materials to
16
+ realize topological magnetism, elucidates the stabilization mechanism of the 3-q state in these materials, and
17
+ establishes neutron spectroscopy on powder samples as a valuable technique to distinguish multi-q from single-q
18
+ states, facilitating the discovery of topologically-nontrivial magnetic materials.
19
+ Most magnetic materials order with simple magnetic struc-
20
+ tures in which spins are collinear or coplanar.
21
+ Noncopla-
22
+ nar magnetic structures are relatively rare, but are of great
23
+ current interest, because they can exhibit topological charac-
24
+ ter and exotic physical properties [1, 2]. For example, the
25
+ finite scalar spin chirality of noncoplanar spin textures can
26
+ generate a topological magneto-optical effect [3] and anoma-
27
+ lous quantum Hall effect [4, 5], even in the absence of spin-
28
+ orbit coupling. Topologically-nontrivial spin textures are typ-
29
+ ically multi-q structures, which superpose magnetic modula-
30
+ tions with symmetry-related wavevectors q [2]. Multi-q spin
31
+ textures with long-wavelength modulations, such as skyrmion
32
+ and hedgehog crystals, are well-studied as hosts of topology-
33
+ driven phenomena [6–8]. In this context, multi-q antiferro-
34
+ magnets are increasingly important [9], because they offer
35
+ higher densities of topological objects with the potential to
36
+ generate stronger physical responses [10].
37
+ To probe the relationships between spin structure, interac-
38
+ tions, topology, and physical response, it is crucial to identify
39
+ real materials that host noncoplanar spin textures. This has
40
+ proved a challenging task, for three main reasons. First, it
41
+ is necessary to identify noncoplanar spin textures that are ro-
42
+ bust to subleading effects such as magnetic anisotropies, spin-
43
+ lattice coupling [11, 12], fluctuations [13–16], and anisotropic
44
+ interactions [17], which usually favor collinear states. Sec-
45
+ ond, most noncoplanar states are found in metals, such as
46
+ USb [18, 19] and γ-Mn alloys [20–25], and are often stable
47
+ only under an applied magnetic field [6, 26].
48
+ On the one
49
+ hand, itinerant electrons can support the generation of phys-
50
+ ical responses; on the other hand, modeling the magnetic in-
51
+ teractions of metals presents fundamental challenges [27–32],
52
+ such that insulators are often more suitable as model materi-
53
+ als. Third, powder neutron-diffraction measurements play a
54
+ central role in solving magnetic structures, but suffer from a
55
+ “multi-q problem”: Such measurements are generally unable
56
+ to distinguish 1-q from multi-q structures [33]. Therefore,
57
+ multi-q spin textures are challenging to stabilize in models,
58
+ and to identify in real materials.
59
+ Here, we identify the Mott-insulating double perovskites
60
+ Ba2YRuO6 and Ba2LuRuO6 [34–37] as prototypical exam-
61
+ ples of noncoplanar 3-q magnetism on the face-centered cu-
62
+ bic (FCC) lattice in zero magnetic field. We obtain evidence
63
+ for 3-q magnetism from a spin-wave analysis of neutron spec-
64
+ troscopy data. By optimizing the magnetic structure and inter-
65
+ actions simultaneously against our data, we show that the 3-
66
+ q structure is stabilized by biquadratic interactions within an
67
+ antiferromagnetic Heisenberg-Kitaev model. Our study ex-
68
+ perimentally establishes that noncoplanar multi-q states are
69
+ stabilized in frustrated FCC antiferromagnets, identifies cubic
70
+ double perovskites as model materials to realize this behavior,
71
+ and identifies guiding principles to facilitate design of materi-
72
+ als with noncoplanar states.
73
+ Our study is motivated by theoretical results for the
74
+ FCC antiferromagnet [13, 38–40].
75
+ The nearest-neighbor
76
+ Heisenberg-Kitaev spin Hamiltonian on the FCC lattice can
77
+ be written as
78
+ H = J ∑
79
+ ⟨i, j⟩
80
+ Si ·Sj +K ∑
81
+ ⟨i, j⟩γ
82
+
83
+ i Sγ
84
+ j,
85
+ (1)
86
+ where Si is a Ru5+ spin with quantum number S = 3/2, J
87
+ and K denote the Heisenberg and Kitaev interactions, respec-
88
+ tively, and γ ∈ {x,y,z} is perpendicular to the cubic plane con-
89
+ taining the bond between neighbors ⟨i, j⟩. For antiferromag-
90
+ netic J > 0 only, the model is frustrated, and orderings with
91
+ q ∈ [1,q,0] are degenerate [13, 39, 40]. The degenerate mani-
92
+ fold includes q = [1,0,0] (“Type I”) ordering, which is favored
93
+ by fluctuations [13, 14, 41] and is observed in Ba2YRuO6
94
+ and Ba2LuRuO6 [34]. Henceforth, we therefore restrict our
95
+ discussion to q = [1,0,0] ordering. For a collinear structure,
96
+ spins may be either parallel or perpendicular to q; the former
97
+ is favored by K < 0 and the latter by K > 0 [38–40].
98
+ Figure 1(a) shows the collinear (1-q) and noncollinear
99
+ arXiv:2301.11395v1 [cond-mat.str-el] 26 Jan 2023
100
+
101
+ 2
102
+ 1-q
103
+ tetragonal
104
+ 2-q
105
+ tetragonal
106
+ 3-q
107
+ cubic
108
+ 3-q
109
+ cubic
110
+ 2-q
111
+ tetragonal
112
+ 1-q
113
+ orthorhombic
114
+ (a)
115
+ ��������
116
+ ����������������
117
+ �������
118
+
119
+ ���
120
+
121
+ ���
122
+
123
+ ���
124
+ ���������
125
+ �������
126
+
127
+ ���
128
+
129
+ ���
130
+
131
+ ���
132
+ ��������
133
+ �������
134
+
135
+ ���
136
+
137
+ ���
138
+
139
+ ���
140
+ ���������
141
+ �������
142
+
143
+ ���
144
+
145
+ ���
146
+
147
+ ���
148
+ K ≥ 0, mX5
149
+ +
150
+ K ≤ 0, mX3
151
+ +
152
+ (b)
153
+ Figure 1.
154
+ (a) Symmetry-allowed magnetic structures with propagation vector q = [1,0,0] on the FCC lattice for Ba2MRuO6 (space group
155
+ Fm¯3m; a = 8.29 and 8.24 Å for M = Y and Lu, respectively). The 1-q, 2-q, and 3-q structures are shown for the mX+
156
+ 3 irrep (left) and the
157
+ mX+
158
+ 5 irrep (right). Spins along different directions are colored differently; note that 1-q, 2-q, and 3-q structures have [100], ⟨110⟩, and ⟨111⟩
159
+ spin directions, respectively. (b) Elastic scattering data (−1.3 ≤ E ≤ 1.3 meV) measured at T = 5 K with Ei = 11.8 meV for Ba2YRuO6 and
160
+ Ba2LuRuO6 (black circles), Rietveld refinements (red lines), and data – fit (blue lines). Tick marks show (top to bottom): nuclear, impurity
161
+ M2O3, and magnetic phases. The mX+
162
+ 3 irrep (left) does not reproduce our data, whereas the mX+
163
+ 5 irrep (right) agrees well with our data.
164
+ (multi-q) structures associated with Type I antiferromag-
165
+ netism. A remarkable property of the FCC lattice is that 1-q,
166
+ 2-q, and 3-q structures are energetically degenerate for all bi-
167
+ linear interactions for which Type I ordering is stable [39, 40].
168
+ Moreover, uniaxial anisotropy (∼S2
169
+ z) and antisymmetric ex-
170
+ change terms are forbidden by Fm¯3m symmetries, and quartic
171
+ anisotropy (∼S4
172
+ x +S4
173
+ y +S4
174
+ z) is forbidden for S = 3/2 operators
175
+ in a cubic environment. Consequently, interactions that would
176
+ usually favor collinear magnetic structures are inactive in the
177
+ S = 3/2 FCC antiferromagnet. This remarkable property po-
178
+ tentially allows noncollinear structures to appear.
179
+ To identify candidate systems for 3-q spin textures among
180
+ the diverse magnetic ground states of double perovskites [42–
181
+ 50], we consider two criteria: Type I antiferromagnetic or-
182
+ dering, and strictly cubic symmetry below the magnetic or-
183
+ dering temperature, TN. The second criterion is key because
184
+ 3-q structures have cubic symmetry, while 1-q and 2-q struc-
185
+ tures have tetragonal or orthorhombic symmetry that could
186
+ drive a crystallographic distortion via spin-lattice coupling
187
+ [Figure 1(a)].
188
+ We investigate Ba2YRuO6 and Ba2LuRuO6
189
+ because they are chemically well-ordered and show no evi-
190
+ dence for low-temperature deviations from cubic symmetry
191
+ [34, 36]. Moreover, recent first-principles calculations predict
192
+ that their magnetic structures might not be collinear [51], in
193
+ apparent contradiction with interpretations of previous exper-
194
+ iments [34].
195
+ We prepared ∼8 g polycrystalline samples of Ba2YRuO6
196
+ and Ba2LuRuO6 by solid-state reaction [52].
197
+ Rietveld re-
198
+ finement revealed stoichiometric samples with minor Lu2O3
199
+ (1.94 wt.%) or Y2O3 (0.65 wt.%) impurities. The magnetic
200
+ ordering temperature TN ≈ 37 K is the same for both sam-
201
+ ples, and is suppressed compared to the Weiss temperature
202
+ θ ∼ −500 K, indicating strong magnetic frustration [36]. We
203
+ performed inelastic neutron-scattering measurements on the
204
+ SEQUOIA instrument at ORNL [53] using incident neutron
205
+ energies Ei = 62 and 11.8 meV, yielding elastic energy reso-
206
+ lutions δ ins ≈ 1.68 and 0.27 meV, respectively.
207
+ Figure 1(b) shows magnetic Rietveld refinements to our
208
+ elastic neutron-scattering data, measured with Ei = 11.8 meV
209
+ at T ≈ 5 K. Applying the q = [1,0,0] propagation vector to
210
+ Fm¯3m crystal symmetry generates two magnetic irreducible
211
+ representations (irreps), notated mX+
212
+ 3 and mX+
213
+ 5 [54, 55].
214
+ These irreps can be distinguished by their magnetic Bragg
215
+ profiles. The mX+
216
+ 5 irrep agrees well with our elastic-scattering
217
+ data for both materials; we obtain ordered magnetic moment
218
+ lengths of 2.56(2) and 2.43(2) µB per Ru for Ba2YRuO6 and
219
+ Ba2LuRuO6, respectively, from Rietveld refinement. Since
220
+ the magnetic form factor for Ru5+ is not known, we tested
221
+ several 4d magnetic form factors [56]; while this choice does
222
+ not qualitatively affect our results, the form factor for Zr+
223
+ (isoelectronic with Ru5+) yields optimal agreement with our
224
+ data and is used throughout. In contrast to the mX+
225
+ 5 irrep, the
226
+ mX+
227
+ 3 irrep strongly disagrees with our data, as it yields zero
228
+ intensity for the strong (100) magnetic Bragg peak. This can
229
+ be understood intuitively for a collinear 1-q structure, because
230
+ neutrons are only sensitive to spin components perpendicular
231
+ to the scattering wavevector, and the mX3+ irrep has S ∥ q
232
+ while the mX5+ irrep has S ⊥ q [Figure 1(a)]. A previous elas-
233
+
234
+ 3
235
+ Figure 2. Broadband inelastic neutron-scattering data (Ei = 62 meV)
236
+ measured at T = 5 K for Ba2YRuO6 (upper panels) and Ba2LuRuO6
237
+ (lower panels), showing (a) intensity as a color plot, and (b) energy
238
+ dependence integrated over 4.0 ≤ Q ≤ 4.5 Å−1, where experimental
239
+ data are shown as black circles, and Gaussian fits to the ∼14 meV
240
+ phonon band as red lines.
241
+ tic neutron-scattering study of Ba2YRuO6 and Ba2LuRuO6
242
+ considered only collinear 1-q structures [34], but could not
243
+ rule out multi-q structures, due to the multi-q problem.
244
+ To overcome the multi-q problem, we consider the en-
245
+ ergy dependence of our neutron-scattering data [57].
246
+ Fig-
247
+ ure 2(a) shows our inelastic data measured with Ei = 62 meV
248
+ at T ≈ 5 K. A structured inelastic signal appears at T < TN
249
+ for small scattering wavevectors, Q ≲ 2 Å−1, which we iden-
250
+ tify as magnon scattering.
251
+ The top of the magnetic band
252
+ overlaps with an intense phonon signal for Q ≳ 2 Å−1. Fig-
253
+ ure 2(b) shows the scattering intensity integrated over 4.0 ≤
254
+ Q ≤ 4.5 Å−1, from which we extract the average energy Eph
255
+ and width σph of this phonon band via Gaussian fits for each
256
+ material. The energy overlap of magnon and phonon modes
257
+ suggests that spin-lattice coupling may be significant, which
258
+ we consider further below.
259
+ Our starting point for modeling the magnetic scattering is
260
+ the Heisenberg-Kitaev model, Eq. (1). For all models, we re-
261
+ quire J > 0 and K > 0 to stabilize mX+
262
+ 5 ordering. We con-
263
+ sider three additional interactions in turn. First, the symmet-
264
+ ric off-diagonal interaction HΓ = Γ∑⟨i, j⟩γ
265
+
266
+
267
+ i Sβ
268
+ j +Sβ
269
+ i Sα
270
+ j
271
+
272
+ is
273
+ the only additional bilinear nearest-neighbor interaction al-
274
+ lowed by symmetry.
275
+ Second, the Heisenberg next-nearest
276
+ neighbor interaction H2 = J2 ∑⟨⟨i, j⟩⟩ Si · Sj has been invoked
277
+ for Ba2YRuO6 [37]; we require J2 ≤ 0 to stabilize Type
278
+ I ordering.
279
+ Third, the nearest-neighbor biquadratic cou-
280
+ pling Hbq = Jbq ∑⟨i, j⟩(Si · Sj)2 has been invoked in density-
281
+ functional-theory calculations for 4d double perovskites due
282
+ to their increased electron hopping relative to 3d analogs [51].
283
+ For Jbq = 0, the classical energy of 1-q, 2-q, and 3-q struc-
284
+ tures is equal for all K, Γ, and J2 that stabilize Type I or-
285
+ dering. Nonzero Jbq removes this degeneracy, and stabilizes
286
+ ����
287
+ ����
288
+ ����
289
+ ����
290
+ ����
291
+ ����
292
+ ����
293
+ ����
294
+
295
+
296
+ ��
297
+ ��
298
+ ��
299
+ ����
300
+ ����
301
+ ����
302
+ ����
303
+ ����
304
+ ����
305
+ ����
306
+ ����
307
+ ��
308
+
309
+ ��
310
+ ��
311
+ ��
312
+ Rwp (%)
313
+ 1-q
314
+ 2-q
315
+ 3-q
316
+ (a)
317
+ (b)
318
+ Jbq
319
+ K
320
+ 3-q
321
+ mX5
322
+ +
323
+ 1-q
324
+ mX5
325
+ +
326
+ 3-q
327
+ mX3
328
+ +
329
+ 1-q
330
+ mX3
331
+ +
332
+ Figure 3. (a) Schematic phase diagram showing the magnetic ground
333
+ states of the J-K-Jbq model. (b) Goodness-of-fit metric Rwp for can-
334
+ didate magnetic structures and interaction models of Ba2YRuO6 (up-
335
+ per graph) and Ba2LuRuO6 (lower graph). The graphs show Rwp for
336
+ refinements of the Heisenberg-Kitaev (J-K) model including a third
337
+ refined parameter Γ (red bars), J2 (blue bars), or Jbq (green bars);
338
+ note that the 2-q structure is stable only for Jbq = 0.
339
+ J (K)
340
+ K (K)
341
+ Jbq (K) A (meV)
342
+ Ba2YRuO6 21.85(3) 0.39(1) 1.32(2) 0.97(3)
343
+ Ba2LuRuO6 22.27(4) 0.36(2) 1.17(3) 2.25(5)
344
+ Table I. Refined values of magnetic interaction parameters for the J-
345
+ K-Jbq model and 3-q structure. Uncertainties indicate 1σ statistical
346
+ confidence intervals.
347
+ 1-q ordering for Jbq < 0 and 3-q ordering for Jbq > 0 [Fig-
348
+ ure 3(a)]. Importantly, since single-ion anisotropies are for-
349
+ bidden for S = 3/2 in a cubic environment, biquadratic ex-
350
+ change is the only physically-plausible mechanism that can
351
+ remove the degeneracy of 1-q and 3-q structures.
352
+ We performed extensive fits to our inelastic neutron-
353
+ scattering data to optimize the magnetic structure and inter-
354
+ actions simultaneously. For each structure associated with the
355
+ mX+
356
+ 5 irrep (1-q, 2-q, or 3-q), we optimized three spin Hamil-
357
+ tonian parameters (J, K, and either Γ, J2, or Jbq) against the
358
+ broadband inelastic data shown in Figure 4(a) and the energy
359
+ dependence near the (100) magnetic Bragg position shown
360
+ in Figure 4(b). The powder-averaged magnon spectrum was
361
+ calculated within a renormalized linear spin-wave theory [58]
362
+ using the SpinW program [59]. The renormalization factor,
363
+ which takes into account higher-order corrections in the 1/S
364
+ expansion, is strictly necessary to extract a correct value of
365
+ Jbq, since the unrenormalized spin-wave theory would lead to
366
+ a value of Jbq that is 2.25 times smaller than the correct value
367
+ [60]. The parameter values were optimized to minimize the
368
+ sum of squared residuals using nonlinear least-squares refine-
369
+ ment [52]. We calculated the energy-dependent broadening of
370
+ the magnon spectrum as δ(E) = δins(E) + Ae−(E−Eph)2/2δ 2
371
+ ph,
372
+ where δ(E) is the overall Gaussian energy width, δins(E) is
373
+ the instrumental resolution, and A is a refined parameter that
374
+ phenomenologically accounts for magnon broadening due to
375
+ coupling with phonons at E ∼ Eph.
376
+ Figure 4(a) compares our broadband inelastic data (Ei =
377
+ 62 meV) with the best fit for each of the 1-q, 2-q, and 3-q
378
+
379
+ 4
380
+ Figure 4. (a) Broadband inelastic neutron-scattering data (Ei = 62 meV) and optimal spin-wave fits for different magnetic structures, showing
381
+ (left to right) experimental data, 1-q fit, 2-q fit, and 3-q fit. (b) Low-energy inelastic neutron-scattering data (Ei = 11.8 meV) and 3-q model
382
+ calculations, showing (left to right) a cut at Q = 0.7450±0.0175 Å−1 comparing experimental data (black circles) and spin-wave fit (red lines),
383
+ experimental data as a Q-E slice, and spin-wave calculation.
384
+ structures. The data show two V-shaped features centered at
385
+ ≈ 0.85 and ≈ 1.70 Å−1, with a sharp cutoff of magnetic signal
386
+ for energies above ∼14 meV. For both materials, these char-
387
+ acteristics are best reproduced by the 3-q structure, while the
388
+ 1-q structure disagrees with our experimental data. These ob-
389
+ servations are confirmed by the goodness-of-fit metric Rwp,
390
+ shown in Figure 3(b). For both materials and for every in-
391
+ teraction model we considered, the 3-q structure yields better
392
+ agreement with our data than the 1-q or 2-q structures. No-
393
+ tably, the goodness-of-fit is more sensitive to the structure than
394
+ the precise magnetic interactions; indeed, the main differences
395
+ between 1-q and 3-q spectra are apparent for Heisenberg ex-
396
+ change only [52]. The global best fit is for the 3-q structure
397
+ and J, K, and Jbq interactions with the refined values given in
398
+ Table I. The refined values of A indicate significant magnon
399
+ broadening, which is larger for Ba2LuRuO6 and is likely due
400
+ to magnon-phonon coupling. Importantly, for both materi-
401
+ als, the biquadratic term is significant, with Jbq/J ∼ 0.06.
402
+ Hence, our key results are that only the 3-q spin texture agrees
403
+ well with our neutron data, and this state is stabilized by bi-
404
+ quadratic interactions in Ba2YRuO6 and Ba2LuRuO6.
405
+ Our model provides insight into the mechanism of gap
406
+ opening [35]. Figure 4(b) compares our low-energy inelastic
407
+ data (Ei = 11.8 meV) with the 3-q magnon spectrum for the
408
+ optimal J-K-Jbq model [Table I]. This calculation reproduces
409
+ the observed ≈ 2.8 meV gap, unlike the J-K-J2 model that
410
+ yields the next-best Rwp [52]. Since single-ion anisotropies
411
+ are forbidden here, the mechanism of gap opening is subtle.
412
+ If K = 0, there is no gap, because the energy of the Heisenberg
413
+ and biquadratic terms is unchanged by global spin rotations.
414
+ For K > 0, whether a gap opens depends on both structure
415
+ and interactions. If the structure is 1-q with Jbq < 0, the clas-
416
+ sical energy is unchanged by global spin rotations in the plane
417
+ perpendicular to q. In this case, there is no gap at the linear
418
+ spin-wave level; a gap is generated only by magnon interac-
419
+ tions in the quantum (S = 1/2) limit [61]. By contrast, if the
420
+ structure is 3-q with Jbq > 0, a gap is present at the linear spin-
421
+ wave level, because Jbq > 0 and K > 0 together favor ⟨111⟩
422
+ spin alignment. Since Ba2YRuO6 and Ba2LuRuO6 are not in
423
+ the quantum limit, the experimental observation of a gap sup-
424
+ ports the presence of biquadratic and Kitaev interactions in a
425
+ 3-q structure.
426
+ We have shown that the magnetic ground states of
427
+ Ba2YRuO6 and Ba2LuRuO6 are noncoplanar 3-q structures
428
+ stabilized by biquadratic interactions. Macroscopic topolog-
429
+ ical physical responses may be generated synthesizing thin
430
+ films of these materials with [111] strain [62]. Our exper-
431
+ imental results strikingly confirm recent first-principles pre-
432
+ dictions [51]. The positive sign of Jbq suggests that the ef-
433
+ fect of inter-site electron hopping outweighs spin-lattice cou-
434
+ pling, since the latter would give a negative contribution to
435
+ Jbq [11, 12]. Crucially, we quantify the magnetic interactions
436
+ that stabilize the noncoplanar state, in contrast to other pro-
437
+ posed 3-q structures in NiS2 [63–65], MnTe2 [66], and UO2
438
+ [67–70], where the relevant interactions are not yet well un-
439
+ derstood. Our work provides several guiding principles to fa-
440
+ cilitate the identification of multi-q spin textures. First, the
441
+ near-degeneracy of 1-q and multi-q structures on the FCC lat-
442
+ tice makes double perovskites enticing systems. In candidate
443
+ materials, the crystal symmetry should be higher than a 1-q
444
+ model would imply. Second, magnets that are not deep in-
445
+ side the Mott-insulating regime are expected to have larger
446
+ Jbq and, consequently, more robust 3-q orderings. This cri-
447
+ terion hints that cubic Ba2YOsO6 [71, 72] may also host a
448
+ 3-q state, due to its extended Os 5d orbitals, potentially offer-
449
+ ing a route to investigate the effect of increased electron hop-
450
+ ping. For small Jbq, we anticipate a thermally-induced tran-
451
+ sition from 3-q to 1-q ordering, since thermal fluctuations fa-
452
+
453
+ 5
454
+ vor collinear states. Third, quartic single-ion anisotropy may
455
+ play a role in FCC magnets with S > 3/2; in particular, easy-
456
+ ⟨111⟩ axis anisotropy should favor 3-q ordering. Finally, our
457
+ key methodological insight is that refining the magnetic struc-
458
+ ture and interactions simultaneously enables 1-q and multi-q
459
+ structures to be distinguished on the FCC lattice, even when
460
+ single-crystal samples are not available.
461
+ This work was supported by the U.S. Department of En-
462
+ ergy, Office of Science, Basic Energy Sciences, Materials Sci-
463
+ ences and Engineering Division. This research used resources
464
+ at the Spallation Neutron Source, a DOE Office of Science
465
+ User Facility operated by the Oak Ridge National Laboratory.
466
467
468
+ [1] Y. Tokura, N. Kanazawa, Chem. Rev. 121, 2857 (2021).
469
+ [2] R. Shindou, N. Nagaosa, Phys. Rev. Lett. 87, 116801 (2001).
470
+ [3] W. Feng, et al., Nature Communications 11, 118 (2020).
471
+ [4] C. Sürgers, G. Fischer, P. Winkel, H. v. Löhneysen, Nature
472
+ Communications 5, 3400 (2014).
473
+ [5] J. Zhou, et al., Phys. Rev. Lett. 116, 256601 (2016).
474
+ [6] T. Kurumaji, et al., Science 365, 914 (2019).
475
+ [7] M. Hirschberger, et al., Nat. Commun. 10, 5831 (2019).
476
+ [8] M. Hirschberger, et al., Phys. Rev. Lett. 125, 076602 (2020).
477
+ [9] S. Gao, et al., Nature 586, 37 (2020).
478
+ [10] O. Gomonay, V. Baltz, A. Brataas, Y. Tserkovnyak, Nat. Phys.
479
+ 14, 213 (2018).
480
+ [11] K. Penc, N. Shannon, H. Shiba, Phys. Rev. Lett. 93, 197203
481
+ (2004).
482
+ [12] F. Wang, A. Vishwanath, Phys. Rev. Lett. 100, 077201 (2008).
483
+ [13] M. V. Gvozdikova, M. E. Zhitomirsky, J. Exp. Theor. Phys. Lett.
484
+ 81, 236 (2005).
485
+ [14] R. Schick, T. Ziman, M. E. Zhitomirsky, Phys. Rev. B 102,
486
+ 220405 (2020).
487
+ [15] R. R. P. Singh, W. Zheng, J. Oitmaa, O. P. Sushkov, C. J. Hamer,
488
+ Phys. Rev. Lett. 91, 017201 (2003).
489
+ [16] P. A. McClarty, P. Stasiak, M. J. P. Gingras, Phys. Rev. B 89,
490
+ 024425 (2014).
491
+ [17] P. A. Maksimov, Z. Zhu, S. R. White, A. L. Chernyshev, Phys.
492
+ Rev. X 9, 021017 (2019).
493
+ [18] J. Jensen, P. Bak, Phys. Rev. B 23, 6180 (1981).
494
+ [19] B. Hälg, A. Furrer, Phys. Rev. B 34, 6258 (1986).
495
+ [20] K. Hirai, T. Jo, J. Phys. Soc. Jpn 54, 3567 (1985).
496
+ [21] S. Kawarazaki, et al., Phys. Rev. Lett. 61, 471 (1988).
497
+ [22] S. Kawarazaki, Y. Sasaki, K. Yasuda, T. Mizusaki, A. Hirai, J.
498
+ Phys.: Condens. Matter 2, 5747 (1990).
499
+ [23] M. W. Long, O. Moze, J. Phys.: Condens. Matter 2, 6013
500
+ (1990).
501
+ [24] R. S. Fishman, et al., Phys. Rev. B 61, 12159 (2000).
502
+ [25] J.-P. Hanke, F. Freimuth, S. Blügel, Y. Mokrousov, Sci. Rep. 7,
503
+ 41078 (2017).
504
+ [26] N. D. Khanh, et al., Nat. Nanotechnol. 15, 444 (2020).
505
+ [27] D. F. Agterberg, S. Yunoki, Phys. Rev. B 62, 13816 (2000).
506
+ [28] S. Hayami, Y. Motome, Phys. Rev. B 90, 060402 (2014).
507
+ [29] T. Jo, J. Phys. F: Met. Phys. 13, L211 (1983).
508
+ [30] Y. Matsuura, T. Jo, J. Phys. Soc. Jpn 78, 124709 (2009).
509
+ [31] S. Hayami, Y. Motome, Phys. Rev. B 103, 054422 (2021).
510
+ [32] S. Hayami, Y. Motome, J. Phys.: Condens. Matter 33, 443001
511
+ (2021).
512
+ [33] J. Kouvel, J. Kasper, J. Phys. Chem. Solids 24, 529 (1963).
513
+ [34] P. Battle, C. Jones, J. Solid State Chem. 78, 108 (1989).
514
+ [35] J. P. Carlo, et al., Phys. Rev. B 88, 024418 (2013).
515
+ [36] T. Aharen, et al., Phys. Rev. B 80, 134423 (2009).
516
+ [37] G. J. Nilsen, C. M. Thompson, G. Ehlers, C. A. Marjerrison,
517
+ J. E. Greedan, Phys. Rev. B 91, 054415 (2015).
518
+ [38] A.
519
+ M.
520
+ Cook,
521
+ S.
522
+ Matern,
523
+ C.
524
+ Hickey,
525
+ A.
526
+ A.
527
+ Aczel,
528
+ A. Paramekanti, Phys. Rev. B 92, 020417 (2015).
529
+ [39] P. Balla, Y. Iqbal, K. Penc, Phys. Rev. Research 2, 043278
530
+ (2020).
531
+ [40] S.-S. Diop, G. Jackeli, L. Savary, Phys. Rev. B 105, 144431
532
+ (2022).
533
+ [41] R. Schick, et al., arXiv p. 2206.12102 (2022).
534
+ [42] S. Gangopadhyay, W. E. Pickett, Phys. Rev. B 93, 155126
535
+ (2016).
536
+ [43] A. Paramekanti, et al., Phys. Rev. B 97, 235119 (2018).
537
+ [44] J.-W. G. Bos, J. P. Attfield, Phys. Rev. B 70, 174434 (2004).
538
+ [45] A. E. Taylor, et al., Phys. Rev. B 93, 220408 (2016).
539
+ [46] A. E. Taylor, et al., Phys. Rev. B 98, 214422 (2018).
540
+ [47] S. Gao, et al., Phys. Rev. B 101, 220412 (2020).
541
+ [48] A. Paramekanti, D. D. Maharaj, B. D. Gaulin, Phys. Rev. B 101,
542
+ 054439 (2020).
543
+ [49] D. D. Maharaj, et al., Phys. Rev. Lett. 124, 087206 (2020).
544
+ [50] N. Iwahara, V. Vieru, L. F. Chibotaru, Phys. Rev. B 98, 075138
545
+ (2018).
546
+ [51] Y.-W. Fang, R. Yang, H. Chen, J. Phys.: Condens. Matter 31,
547
+ 445803 (2019).
548
+ [52] See supplemental material for synthesis details, experimental
549
+ and computational methods, and two supplemental figures.
550
+ [53] G. E. Granroth, et al., J. Phys.: Conf. Ser. 251, 012058 (2010).
551
+ [54] A. P. Cracknell, B. L. Davies, S. C. Miller, W. F. Love, Kro-
552
+ necker Product Tables. General Introduction and Tables of Ir-
553
+ reducible Representations of Space Groups, vol. 1 (Plenum,
554
+ 1979).
555
+ [55] A. Wills, J. Phys. IV France 11, 133 (2001).
556
+ [56] P. J. Brown, International Tables for Crystallography (Kluwer
557
+ Academic Publishers, Dordrecht, 2004), vol. C, chap. Magnetic
558
+ Form Factors, pp. 454–460.
559
+ [57] J. A. M. Paddison, et al., npj Quantum Materials 6, 99 (2021).
560
+ [58] J.-P. Ader, Phys. Rev. B 65, 014411 (2001).
561
+ [59] S. Toth, B. Lake, J. Phys.: Condens. Matter 27, 166002 (2015).
562
+ [60] D. Dahlbom, et al., Renormalized classical theory of quantum
563
+ magnets. In preparation.
564
+ [61] A. A. Aczel, et al., Phys. Rev. B 93, 214426 (2016).
565
+ [62] Z. Wang, P. Zhang, J. Shi, Phys. Rev. B 76, 094406 (2007).
566
+ [63] K. Kikuchi, T. Miyadai, T. Fukui, H. Itô, K. Takizawa, J. Phys.
567
+ Soc. Jpn 44, 410 (1978).
568
+ [64] K. Yosida, S. Inagaki, J. Phys. Soc. Jpn 50, 3268 (1981).
569
+ [65] T. Higo, S. Nakatsuji, J. Phys. Soc. Jpn 84, 053702 (2015).
570
+ [66] P. Burlet, et al., Phys. Rev. B 56, 14013 (1997).
571
+ [67] B. C. Frazer, G. Shirane, D. E. Cox, C. E. Olsen, Phys. Rev.
572
+ 140, A1448 (1965).
573
+ [68] J. Faber, G. H. Lander, Phys. Rev. B 14, 1151 (1976).
574
+ [69] R. Caciuffo, et al., Phys. Rev. B 59, 13892 (1999).
575
+ [70] S. L. Dudarev, et al., Phys. Rev. Materials 3, 083802 (2019).
576
+ [71] E. Kermarrec, et al., Phys. Rev. B 91, 075133 (2015).
577
+ [72] D. D. Maharaj, et al., Phys. Rev. B 98, 104434 (2018).
578
+
4dFIT4oBgHgl3EQf6yuB/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
59E3T4oBgHgl3EQfpgot/content/2301.04642v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fe2fadff4d89db21573fedbdff14c20696a119675ef77374715d2ffa6f3825
3
+ size 1084164
59E3T4oBgHgl3EQfpgot/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac899f2727cf246c5a0f62bd443c285a0e7d99d9cde9c05fbb409e109494723
3
+ size 2097197
5NE1T4oBgHgl3EQfBAIU/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2a4d684e46c761f4e6e042a065e4f90ae35a06650473be405d01f0c95a33d9a
3
+ size 5111853
69E4T4oBgHgl3EQf2A2F/content/2301.05295v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c69a80058bd3f736017f0e47ca9d1f484a6a2055dab3e86b0e4e9d1f5a6bd98a
3
+ size 372473
69E4T4oBgHgl3EQf2A2F/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ea1980a24b89cd1f02246c6163fdfcac282dded681754018fbc80f2728ca67
3
+ size 1310765
69E4T4oBgHgl3EQf2A2F/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5d38a2f314ea27466571fb4ec6613366269d265b16baea4754a33a942c449fa
3
+ size 50392
79E3T4oBgHgl3EQfRwk1/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb1ac57232576c2c8c1a8a3112d11cb5d825311f3f6b41710ef68cffa93510b
3
+ size 2883629
79E3T4oBgHgl3EQfRwk1/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6119c506a4d3d9ef02d3480cb50e9f9e9b7955db2314ddede1098f5743ec83e7
3
+ size 120922
7NE4T4oBgHgl3EQfCQsk/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f587aa5f3ac753975bed34452c6334aee6e6bd6b3dbdeb5a3e0fb63fbd2eb92
3
+ size 131055
7dE0T4oBgHgl3EQffQBI/content/2301.02401v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b70bb0495f5246f46cac5880b70c75b535d5db190a78b2162f53556adf9961d
3
+ size 2435792
7dE0T4oBgHgl3EQffQBI/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fd5028d9992daf5ba590fd9b84a0dde2c8b3b2773e7efffd6d5f6d473d50bc5
3
+ size 3670061
7dE0T4oBgHgl3EQffQBI/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a30b8d5b6a09a5b1715853ee8097d8f942e0b8590b94639840c32d76e731ded
3
+ size 136400
7tAyT4oBgHgl3EQfQvZV/content/2301.00051v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:089559cc2484e8a0f0b7a8ae0c91efcf0380b6b23b570837fa96777f68e12afb
3
+ size 4937117
7tAyT4oBgHgl3EQfQvZV/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00e7628507023ff6bac558306c01c0171d33c8d747547350ca380291c05a165e
3
+ size 5898285
8dE1T4oBgHgl3EQfngSj/content/2301.03310v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7634dfac412de2aa78296109e17da6ed608455fdad5e5a136ce462945d40179
3
+ size 1047816
8dE1T4oBgHgl3EQfngSj/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3f955148c53944f97fe820272b9aaa2f3e3eab6ce804a3f96e534b23c979f07
3
+ size 89305
8tAyT4oBgHgl3EQf3PkC/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6b98104db3e1afa6d1c22424cf60db2547958de38e4ba401c43ce3be0b60048
3
+ size 403508
9dE4T4oBgHgl3EQfDQsU/content/tmp_files/2301.04867v1.pdf.txt ADDED
@@ -0,0 +1,1524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.04867v1 [physics.chem-ph] 12 Jan 2023
2
+ Model for vibrationally enhanced tunneling of
3
+ proton transfer in hydrogen bond
4
+ A.E. Sitnitsky,
5
+ Kazan Institute of Biochemistry and Biophysics, FRC Kazan Scientific Center of
6
+ RAS, P.O.B. 30, 420111, Russian Federation. e-mail: [email protected]
7
+ Abstract
8
+ Theoretical analysis of the effect of an external vibration on proton transfer (PT) in
9
+ a hydrogen bond (HB) is carried out. It is based on the two-dimensional Schr¨odinger
10
+ equation with trigonometric double-well potential. Its solution obtained within the
11
+ framework of the standard adiabatic approximation is available. An analytic for-
12
+ mula is derived that provides the calculation of PT rate with the help of elements
13
+ implemented in Mathematica. We exemplify the general theory by calculating PT
14
+ rate constant for the intermolecular HB in the Zundel ion H5O+
15
+ 2 (oxonium hydrate).
16
+ This object enables one to explore a wide range of the HB lengths. Below some crit-
17
+ ical value of the frequency of the external vibration the calculated PT rate yields
18
+ extremely rich resonant behavior (multiple manifestations of bell-shaped peaks). It
19
+ takes place at symmetric coupling of the external vibration to the proton coordinate.
20
+ This phenomenon is absent for anti-symmetric and squeezed mode couplings.
21
+ Key words: Schr¨odinger equation, double-well potential, quantum tunneling,
22
+ spheroidal function, Zundel ion.
23
+ 1
24
+ Introduction
25
+ Proton transfer (PT) in hydrogen bonds (HB) is one of the main processes
26
+ in the reaction rate theory. It takes place in the most important biological
27
+ molecules such as proteins (participating in some enzymatic reactions) and
28
+ DNA (arguably participating in the occurrence of mutations). In particular
29
+ the phenomenon of vibrationally enhanced (or assisted or promoted) tunnel-
30
+ ing at PT (i.e., resonant acceleration of the process by a coupled oscillation
31
+ in some frequency range) [1], [2] is of great interest especially in regard of its
32
+ Email address: [email protected] ( A.E. Sitnitsky).
33
+ Preprint submitted to Chemical Physics Letters
34
+ 13 January 2023
35
+
36
+ possible role in a mechanism for enzymatic hydrogen transfer [3-6]. Within
37
+ the context of enzyme catalysis it is a specific case of the more general trend
38
+ named ”rate-promoting vibration” [6-11]. There are several cases in which a
39
+ vibration can be coupled to the proton coordinate in HB. First of all there
40
+ is the heavy atoms stretching mode which is an intrinsic degree of freedom
41
+ in HB. It is thoroughly studied theoretically since the pioneer articles [12],
42
+ [13], [1] dealing with vibrationally promoted PT in solids. Unfortunately it
43
+ is an internal vibration and its fixed frequency is not an experimentally con-
44
+ trollable parameter. Then there are external vibrations exerted on HB and
45
+ provided either by protein scaffold for HB in enzymes or by some means from
46
+ the researcher’s toolkit for HB in model compounds. One of the most effi-
47
+ cient ways for such purpose is the usage of the IR electromagnetic field of an
48
+ optical cavity. The phenomenon of resonant activation (or, in contrast, sup-
49
+ pression) of reaction rates is widely discussed for modifying chemical kinetics
50
+ by optical cavities (for recent articles in this field which is sometimes called
51
+ vibrational polariton chemistry see, e.g., [14-16] and refs. therein). The reso-
52
+ nance, i.e., maximal cavity induced enhancement of the reaction rate under
53
+ the vibrational resonance condition is produced in this case by mixing the
54
+ electromagnetic field with quantum states of molecular systems. The cavity is
55
+ equivalent to a harmonic oscillator of a given frequency coupled to the molec-
56
+ ular system. The Hamiltonian of the molecule degree of freedom coupled to
57
+ the field oscillator in the electric dipole approximation of light-matter interac-
58
+ tion has the same structure as those used for PT coupled to the heavy atoms
59
+ stretching mode in HB. In this regard constructing reliable theoretical models
60
+ of PT which take into account the possibility of varying the frequency of the
61
+ external vibration exerted on HB is a long-standing problem for the reaction
62
+ rate theory and seems to be of interest for perspectives of various application.
63
+ A proton in HB is known to be sufficiently light to exhibit full-fledged quan-
64
+ tum behavior leading to tunneling effect, energy levels splitting, etc (see, e.g.,
65
+ [12,13,17-28] and refs. therein). Physical models of PT based on simplified
66
+ Hamiltonians take their peculiar place in the enormous amount of literature on
67
+ HB including also ab initio calculations by the methods of quantum chemistry,
68
+ DFT, their combination with molecular dynamics simulations (considering nu-
69
+ clei as classical Newtonian particles), QM/MM, chemical physics approaches
70
+ within the framework of modern trends in TST, quantum-classical Liouville
71
+ dynamics, etc. In physical models of PT the reaction coordinate is singled out
72
+ and studied separately from the environment for which various approxima-
73
+ tions are assumed. The problem of PT rate estimate is inevitably reduced to
74
+ a one-dimensional path on the potential energy surface (PES) and as a result
75
+ to a one-dimensional cross-section of PES for HB which usually has the form
76
+ of a double-well potential (DWP). Quantum mechanical models of PT are
77
+ motivated by the necessity to take into account other (than the reaction co-
78
+ ordinate) internal dynamic modes, e.g., the heavy atoms stretching mode and
79
+ to account for vibrationally and/or thermally assisted tunneling. The modern
80
+ 2
81
+
82
+ physical approach to taking into account dissipative effects at tunneling is
83
+ based on the Lindblad master equation (describing the dynamics of Marko-
84
+ vian open quantum systems) for the time evolution of the density matrix and
85
+ Caldeira-Leggett model of the thermal bath. For PT such scheme was initiated
86
+ in [12,13] and by now it has been thoroughly studied within the framework of
87
+ the general context for the reaction rate theory (see, e.g., [17] and refs. therein).
88
+ The problem of the rate-promoting vibration for PT is considered with the
89
+ help of this theory in [28]. The authors obtained the desired increase of the PT
90
+ rate at adding the vibrational mode. However they came to a conclusion that
91
+ the lower its frequency the stronger the enhancement of PT rate. Our aim is to
92
+ find out the conceptual possibility of resonant activation (bell-shaped peaks)
93
+ with frequency. In the present article we avoid the complications of the above
94
+ mentioned theory (ensuing from the necessity to deal with numerous evolution
95
+ equations for the density matrix elements) which seem to be unimportant for
96
+ our aims. For calculating PT rate we make use of the Weiner’s theory [18,19].
97
+ Our model of HB is maximally simple and constructed in an ad hoc manner
98
+ for studying the effect of PT resonant activation. It corresponds to HB in a
99
+ gas phase and does not touch upon the effects of environment taking place
100
+ in solution. It deals only with two salient degrees of freedom, i.e., the proton
101
+ coordinate and that of an oscillator (e.g., the heavy atoms stretching mode or
102
+ an external vibration) with symmetric coupling between them. We treat both
103
+ degrees of freedom quantum-mechanically by solving the corresponding two-
104
+ dimensional Schr¨odinger equation (SE). We make use of literature data for
105
+ the one-dimensional cross-section of PES from quantum-chemical calculations
106
+ and model it by a suitable phenomenological DWP. For the case of the heavy
107
+ atoms stretching mode we use literature data of IR-spectroscopy for HB to
108
+ determine its frequency and the strength of proton coordinate coupling to it.
109
+ The calculation of PT rate in HB requires the knowledge of the energy levels
110
+ which are the eigenvalues of the corresponding SE with DWP. PT is a typical
111
+ example of a quantum particle in DWP which is an omnipresent problem in
112
+ physics and chemistry [23,26,29-41]. Most DWPs used for the analysis of HB
113
+ and composed of polynomials, exponentials (e.g., the double Morse potential)
114
+ or their combinations are amenable only to numerical solutions (even in one-
115
+ dimensional case let alone its two-dimensional generalization) or approximate
116
+ analytic approaches like the quasi-classical (WKB) method. This restriction
117
+ was inevitable until 2010-s because of the lack of a convenient DWP for which
118
+ SE would have an exact analytic solution (see [29] and refs. therein). Since then
119
+ a number of exactly solvable DWPs suitable for chemical problems (taking
120
+ infinite values at the boundaries of the spatial variable interval) appeared. For
121
+ them analytic solutions of SE are feasible via the confluent Heun’s function
122
+ [23], [31-38] or the spheroidal function [39,40]. The latter is a well-studied
123
+ special function of mathematical physics [42] implemented in Mathematica.
124
+ The case which is amenable to the treatment by both functions [23,31,39]
125
+ makes use of the so-called trigonometric DWP (TDWP). In the previous years
126
+ 3
127
+
128
+ TDWP was applied to numerous objects [23,24,25,31,39,40,43,44]. The aim
129
+ of the present article is to show that TDWP enables one to construct an
130
+ analytically tractable model for PT resonant activation in HB. We exemplify
131
+ the general theory by the analysis of PT rate for intermolecular HB in the
132
+ Zundel ion H5O+
133
+ 2 (oxonium hydrate H2O · · · H · · · OH2 in which the proton is
134
+ equally shared between two water molecules). For the Zundel ion the detailed
135
+ data of IR spectroscopy [20-22] along with the quantum chemical ab initio
136
+ calculations [45,46] are available. As a result the Zundel ion suits well for the
137
+ purpose of demonstrating the capability of our approach to the calculation of
138
+ PT rate in HB. For the Zundel ion the distance between the oxygen atoms ROO
139
+ is not a fixed and predetermined value but can be varied in a wide range. In the
140
+ present article the case ROO = 3.0 A is chosen because it provides sufficiently
141
+ high barrier to exclude the contribution of the over-barrier transition into PT
142
+ rate constant even at high temperature. This choice is in accord with the aim
143
+ of the article to study the effect of an external vibration on the tunneling
144
+ contribution into the rate constant.
145
+ The paper is organized as follows. In the preliminary Sec.2 we remind some
146
+ results of the Weiner’s theory in the form suitable for our analysis. In Sec.3
147
+ we briefly summarize the results of [25] which are necessary for the calculation
148
+ of PT rate in HB. In Sec.4 we derive the expression for the PT rate constant.
149
+ In Sec.5 the results are discussed and the conclusions are summarized. In
150
+ Appendix some technical information is presented.
151
+ 2
152
+ Weiner’s theory
153
+ In the Weiner’s theory [18,19] the proton position is described by the sta-
154
+ tionary one-dimensional SE (which we write in the dimensionless form) with
155
+ symmetric DWP U(x) which has the solutions for the energy levels ǫq and the
156
+ corresponding wave functions ψq(x)
157
+ ψ′′
158
+ q (x) + [ǫq − U(x)] ψq(x) = 0
159
+ (1)
160
+ The rate constant consists of the contribution from the tunneling process and
161
+ that from the over-barrier transition. Concerning the former the Weiner’s the-
162
+ ory deals with two important values. The first one is the probability flux to the
163
+ right of particles in the left well when the particle is in the q-th state Jq. The
164
+ second one is the quantum transmission coefficient, i.e., the fraction of those
165
+ right-moving particles which are transmitted to the right well | Tq |2. Accord-
166
+ ing to [18,19] the reaction rate constant is a result of Boltzmann averaging of
167
+ 4
168
+
169
+ the product Jq | Tq |2 calculated over the doublets
170
+ k =
171
+
172
+
173
+
174
+
175
+ q=0
176
+ e−βǫq
177
+
178
+
179
+ −1 
180
+
181
+
182
+ N
183
+
184
+ n=0
185
+ e−βǫ2nJ2n | T2n |2 +
186
+
187
+
188
+ m=2N+2
189
+ e−βǫm
190
+
191
+
192
+
193
+ (2)
194
+ where n = 0, 1, 2, ..., N , ǫ2n is the energy for the level 2n described by the wave
195
+ function ψ2n(x). In the Weiner’s theory the quantum transmission coefficient
196
+ is calculated for the doublets which are counted by the even energy levels. For
197
+ this reason n is fixed to be even in the first sum in the curly brackets (see the
198
+ text below the formula (3.1) in Sec.III of [19] the formulas from which are used
199
+ in the present article). The first sum in the curly brackets corresponds to the
200
+ contribution due to the tunneling process in the reaction rate. It is over the
201
+ energy levels below the barrier top for which the notions of J2n and | T2n |2
202
+ have sense. In (2) it is suggested by Weiner that the quantum transmission
203
+ coefficient of the lower level in the doublet is determined by the splitting of
204
+ the energy levels in it. Thus N +1 is the number of doublets below the barrier
205
+ top and ǫ2N is the lower energy level in the last doublet in this region. As a
206
+ result only the sum over doublets (i.e., even levels q = 2n) is left. The second
207
+ sum in the curly brackets corresponds to the over-barrier transition and ǫ2N+2
208
+ is the the first energy level above the barrier top. The Weiner’s theory is based
209
+ on the quasi-classical approximation of the solution of SE [19]
210
+ ψ2n(x) =
211
+ B2n
212
+
213
+ P2n(x)
214
+ cos
215
+
216
+
217
+ x
218
+
219
+ 0
220
+ dξ P2n(ξ) + S2n
221
+
222
+
223
+ (3)
224
+ for x ≥ 0. Taking into account that for even energy levels the wave function
225
+ is symmetric (ψ′
226
+ 2n(0)) one obtains that
227
+ tan S2n = − P ′
228
+ 2n(0)
229
+ 2P 2
230
+ 2n(0)
231
+ (4)
232
+ The function Pq(x) satisfies the so-called Milne equation
233
+ P 2
234
+ q + U(x) + 1
235
+ 2
236
+
237
+ P ′′
238
+ q
239
+ Pq
240
+ − 3
241
+ 2
242
+
243
+ P ′
244
+ q
245
+ �2
246
+ P 2
247
+ q
248
+
249
+  = ǫq
250
+ (5)
251
+ The expression for | T2n |2 follows from (3.5) of [19]
252
+ | T2n |2= ψ2
253
+ 2n(0)P2n(0)
254
+ B2
255
+ 2n
256
+ (6)
257
+ 5
258
+
259
+ The expression for J2n is given by (2.14) of [19]
260
+ J2n = B2
261
+ 2n
262
+ 2
263
+ (7)
264
+ In the particular case P ′
265
+ 2n(0) = 0 (which will be pertinent in our further
266
+ consideration) it follows from (4) that
267
+ S2n = 0
268
+ (8)
269
+ Substitution of the results into (2) yields
270
+ k =
271
+
272
+
273
+
274
+
275
+ q=0
276
+ e−βǫq
277
+
278
+
279
+ −1 
280
+
281
+
282
+ 1
283
+ 2
284
+ N
285
+
286
+ n=0
287
+ e−βǫ2nB2
288
+ 2n +
289
+
290
+
291
+ m=2N+2
292
+ e−βǫm
293
+
294
+
295
+
296
+ (9)
297
+ It is worthy to note that in the original Weiner’s approach both ǫq and ψq(x)
298
+ are unknown and all efforts are directed to obtain formulas that do not con-
299
+ tain values like ψq(0) or ψ′
300
+ q(0). In contrast for TDWP the exact solution of
301
+ SE ψq(x) as well as the corresponding energy levels ǫq are available and we
302
+ make use of the them. Also it should be stressed that the Weiner’s theory is
303
+ originally written for the infinite range of the space variable −∞ < x < ∞
304
+ with the requirement | ψq(x) |→ 0 at x → ±∞. In our case of TDWP we
305
+ have the requirement | ψq(x) |→ 0 at x → ±π/2. For this reason we apply the
306
+ corresponding formulas to the case −π/2 ≤ x ≤ π/2. We consider SE (1) in
307
+ this range with the dimensionless form of the symmetric TDWP [39]
308
+ U(x) =
309
+
310
+ m2 − 1
311
+ 4
312
+
313
+ tan2 x − p2 sin2 x
314
+ (10)
315
+ Here m is an integer number and p is a real number. The two parameters of
316
+ TDWP m and p are related to two main characteristics of the potential energy
317
+ surface, i.e., the barrier height and the barrier width (see Appendix). The
318
+ example of TDWP for intermolecular HB in the Zundel ion with ROO = 3.0 A
319
+ distance between oxygen atoms is presented in Fig.1. For TDWP the exact
320
+ solution of SE is available [39]
321
+ ψq(x) = cos1/2 x ¯Sm(q+m) (p; sin x)
322
+ (11)
323
+ q = 0, 1, 2, ... and ¯Sm(q+m) (p; s) is the normalized angular prolate spheroidal
324
+ function [42]. It is implemented in Mathematica as SpheroidalPS[(q+m), m, ip, s]
325
+ (note that the latter is a non-normalized one). The energy levels are
326
+ ǫq = λm(q+m) (p) + 1
327
+ 2 − m2 − p2
328
+ (12)
329
+ 6
330
+
331
+ -0.5
332
+ 0.5
333
+ x
334
+ -400
335
+ -300
336
+ -200
337
+ -100
338
+ U(x)
339
+ �7
340
+ 0=57.9;ωmax≈0.0064
341
+ -0.5
342
+ 0.5
343
+ x
344
+ -400
345
+ -300
346
+ -200
347
+ -100
348
+ U(x)
349
+ Λ6
350
+ 0=22.85
351
+ -0.5
352
+ 0.5
353
+ x
354
+ -400
355
+ -300
356
+ -200
357
+ -100
358
+ U(x)
359
+ Λ4
360
+ 0=-57.705
361
+ -0.5
362
+ 0.5
363
+ x
364
+ -400
365
+ -300
366
+ -200
367
+ -100
368
+ U(x)
369
+ Λ5
370
+ 0=-58.386
371
+ -0.5
372
+ 0.5
373
+ x
374
+ -400
375
+ -300
376
+ -200
377
+ -100
378
+ U(x)
379
+ Λ2
380
+ 0=-191.406
381
+ -0.5
382
+ 0.5
383
+ x
384
+ -400
385
+ -300
386
+ -200
387
+ -100
388
+ U(x)
389
+ Λ3
390
+ 0=-191.782
391
+ -0.5
392
+ 0.5
393
+ x
394
+ -400
395
+ -300
396
+ -200
397
+ -100
398
+ U(x)
399
+ Λ0
400
+ 0=-346.192919
401
+ -0.5
402
+ 0.5
403
+ x
404
+ -400
405
+ -300
406
+ -200
407
+ -100
408
+ U(x)
409
+ Λ1
410
+ 0=-346.197925
411
+ -0.5
412
+ 0.5
413
+ x
414
+ -400
415
+ -300
416
+ -200
417
+ -100
418
+ U(x)
419
+ Λ7
420
+ 0=77.2;ωc≈0.0112
421
+ -0.5
422
+ 0.5
423
+ x
424
+ -400
425
+ -300
426
+ -200
427
+ -100
428
+ U(x)
429
+ Λ6
430
+ 0=32.94
431
+ -0.5
432
+ 0.5
433
+ x
434
+ -400
435
+ -300
436
+ -200
437
+ -100
438
+ U(x)
439
+ Λ5
440
+ 0=-39.296
441
+ -0.5
442
+ 0.5
443
+ x
444
+ -400
445
+ -300
446
+ -200
447
+ -100
448
+ U(x)
449
+ Λ4
450
+ 0=-45.915
451
+ -0.5
452
+ 0.5
453
+ x
454
+ -400
455
+ -300
456
+ -200
457
+ -100
458
+ U(x)
459
+ Λ3
460
+ 0=-163.7194636
461
+ -0.5
462
+ 0.5
463
+ x
464
+ -400
465
+ -300
466
+ -200
467
+ -100
468
+ U(x)
469
+ Λ2
470
+ 0=-163.8071785
471
+ -0.5
472
+ 0.5
473
+ x
474
+ -400
475
+ -300
476
+ -200
477
+ -100
478
+ U(x)
479
+ Λ1
480
+ 0=-306.41765635
481
+ -0.5
482
+ 0.5
483
+ x
484
+ -400
485
+ -300
486
+ -200
487
+ -100
488
+ U(x)
489
+ Λ0
490
+ 0=-306.41765771
491
+ -0.5
492
+ 0.5
493
+ x
494
+ -400
495
+ -300
496
+ -200
497
+ -100
498
+ U(x)
499
+ Zundel ion ROO=3.0 A
500
+ Fig. 1. The trigonometric double-well potential (10) at the values of the parameters
501
+ m = 57; p = 76. The parameters are chosen to describe the hydrogen bond in the
502
+ Zundel ion H5O+
503
+ 2 (oxonium hydrate) for the case ROO = 3.0 ˚A (they are extracted
504
+ from the data of quantum chemistry [46]). Several energy levels are indicated for the
505
+ coupling constant α = 0.3. They are given by (19) and calculated at two frequencies
506
+ of the external oscillator (for that of the main peak ωmax ≈ 0.0064 depicted by long
507
+ dashes and for the critical frequency ωc ≈ 0.0112 depicted by short dashes).
508
+ Here λm(q+m) (p) is the spectrum of eigenvalues for ¯Sm(q+m) (p; s). It is imple-
509
+ mented in Mathematica as λm(q+m) (p) ≡ SpheroidalEigenvalue[(q+m), m, ip].
510
+ For TDWP the position of the right minimum is defined by the requirement
511
+ cos xmin =
512
+ �m2 − 1/4
513
+ p2
514
+ �1/4
515
+ (13)
516
+ 3
517
+ Solution of two-dimensional Schr¨odinger equation with trigono-
518
+ metric double-well potential
519
+ For the two-dimensional SE the wave function is the function of the proton
520
+ coordinate x and that of the oscillator z. The interaction Hamiltonian for
521
+ various types of the mode coupling can be schematically depicted by the form
522
+ αf(x)g(z) where α is the coupling constant. For the symmetric mode coupling
523
+ it is αzx2, for anti-symmetric and squeezed mode couplings it is αzx and αz2x2
524
+ respectively. In the present article we consider the case of the symmetric mode
525
+ coupling (see a comment on other types of interaction in Sec.5). For TDWP
526
+ it is more natural for the mathematical convenience to make for x in the
527
+ interaction term the transformation x → sin x so that the coupling term is
528
+ αz sin2 x. In fact the external vibration always interacts with some function
529
+ f(x) of the proton coordinate x (e.g., the with the dipole moment if the
530
+ vibration is produced by an electro-magnetic field). The term αzx2 for the
531
+ symmetric mode coupling means that only the linear approximation for the
532
+ function f(x) ≈ x is taken into account. However the linear approximation can
533
+ be valid within the interval of a sufficiently small x only. In our opinion it is
534
+ 7
535
+
536
+ reasonable to go beyond the linear approximation, i.e., to make the replacing
537
+ x −→ sin x at −π/2 ≤ x ≤ π/2. In the case of the dipole moment it deflects to
538
+ slower growth than the linear one (see, e.g., Fig. 10.54 in [47]. The necessity to
539
+ go beyond the framework of the linear approximation for HB in the Zundel ion
540
+ was stressed in [22]. To achieve this goal we model such deflection by replacing
541
+ the linear term by the trigonometric one x −→ sin x at −π/2 ≤ x ≤ π/2. Then
542
+ the dimensionless form of the two-dimensional SE with the symmetric mode
543
+ coupling and TDWP is [25]
544
+
545
+ δ ∂2
546
+ ∂z2 + ∂2
547
+ ∂x2 + Λ −
548
+
549
+ m2 − 1
550
+ 4
551
+
552
+ tan2 x + p2 sin2 x−
553
+ ω2z2
554
+ 2
555
+ − αz sin2 x
556
+
557
+ Φ(x, z) = 0
558
+ (14)
559
+ Here ω is the frequency of the oscillator coupled to the proton coordinate. The
560
+ dimensionless variables and parameters are discussed in Appendix for the case
561
+ when the oscillator is produced by the heavy atoms stretching mode in HB.
562
+ The solution of (14) in the adiabatic approximation corresponding to the q-th
563
+ state of the particle in TDWP and the j-th state of the oscillator is [25]
564
+ Φ(x, q, z, j) ≈ ϕq
565
+ j(z)ψq(x)
566
+ (15)
567
+ Here the quantum number q quantizes the states of the particle in TDWP and
568
+ ψq(x) is given by (11). The quantum number j in (15) quantizes the excitation
569
+ states of the oscillator
570
+ ϕq
571
+ j(z) ≈ A exp
572
+
573
+
574
+ ω
575
+ 2
576
+
577
+
578
+
579
+ z + cq
580
+ ω2
581
+ �2� j!(−2)j
582
+ (2j)! H2j
583
+
584
+
585
+ �2ω2
586
+ δ
587
+ �1/4 �
588
+ z + cq
589
+ ω2
590
+ �
591
+ (16)
592
+ j = 0, 1, 2, ... , Hn(x) is the Hermit polynomial and A is a normalization
593
+ constant. Making use of N2.20.16.6 from [48] we obtain
594
+ A−2 =
595
+ �j!(−2)j
596
+ (2j)!
597
+ �2
598
+ 24j
599
+ �√
600
+
601
+ ω Γ
602
+
603
+ 2j + 1
604
+ 2
605
+
606
+ 2F1
607
+
608
+ −2j, −2j, 1
609
+ 2 − 2j; −1
610
+ 2
611
+
612
+ (17)
613
+ where
614
+ 2F1 (a, b, c; x) is the hypergeometric function. The coefficient cq is
615
+ cq = α
616
+ 1
617
+
618
+ −1
619
+ dη η2 � ¯Sm(q+m) (p; η)
620
+ �2
621
+ (18)
622
+ 8
623
+
624
+ The energy levels corresponding to (15) are [25]
625
+ Λj
626
+ q ≈ λm(q+m) (p) + 1
627
+ 2 − m2 − p2 − (cq)2
628
+ 2ω2 + (4j + 1)ω
629
+
630
+ δ
631
+ 2
632
+ (19)
633
+ 4
634
+ Proton transfer rate constant
635
+ We introduce the dimensionless inverse temperature β (for its expression
636
+ via dimensional parameters of the model see Appendix). Further we restrict
637
+ ourselves to the relatively high temperature range 200 K ≤ T ≤ 400 K
638
+ (0.0345 ≤ β ≤ 0.069) in which the Boltzmann statistics is valid. Then the
639
+ partition function is calculated with the help of the energy levels Λk
640
+ q given by
641
+ the formula (19)
642
+ Z(β, ω) =
643
+
644
+ q
645
+
646
+
647
+ j=0
648
+ exp
649
+
650
+ −βΛj
651
+ q(ω)
652
+
653
+ =
654
+
655
+
656
+ j=0
657
+ e
658
+ −β
659
+
660
+ (4j+1)ω√
661
+ δ
662
+ 2+ 1
663
+ 2−m2−p2�
664
+
665
+ q
666
+ e
667
+ −β
668
+
669
+ λm(q+m)(p)− (cq)2
670
+ 2ω2
671
+
672
+ (20)
673
+ With the help of (16) we calculate the average value of z
674
+ < z >q=
675
+
676
+
677
+ −∞
678
+ dz z
679
+
680
+ ϕq
681
+ j(z)
682
+ �2 = − cq
683
+ ω2
684
+ (21)
685
+ We define the auxiliary parameter
686
+ ˜pq =
687
+
688
+ p2 − α < z >q
689
+ (22)
690
+ and the auxiliary TDWP
691
+ ˜U(x) =
692
+
693
+ m2 − 1
694
+ 4
695
+
696
+ tan2 x −
697
+
698
+ p2 − α < z >q
699
+
700
+ sin2 x
701
+ (23)
702
+ We introduce the auxiliary wave function ˜ψq(x) which satisfies SE
703
+ ˜ψ′′
704
+ q (x) +
705
+
706
+ λm(q+m) (˜pq) + 1
707
+ 2 − m2 − ˜p2
708
+ q − ˜U(x)
709
+
710
+ ˜ψq(x) = 0
711
+ (24)
712
+ 9
713
+
714
+ Its solution is (11) with taking into account the replacement p →
715
+
716
+ p2 − α < z >q
717
+ ˜ψq(x) = cos1/2 x ¯Sm(q+m)
718
+ ��
719
+ p2 − α < z >q; sin x
720
+
721
+ (25)
722
+ We seek the solution of (16) in the form
723
+ Φ(x, q, z, j) ≈ ϕq
724
+ j(z)
725
+ Bq
726
+
727
+ Pq(x)
728
+ cos
729
+
730
+
731
+ x
732
+
733
+ 0
734
+ dξ Pq(ξ) + Sq
735
+
736
+
737
+ (26)
738
+ We take into account the equation for ϕq
739
+ j(z) (see argumentation in [25])
740
+
741
+ δ d2
742
+ dz2 − ǫq + Λj
743
+ q − ω2z2
744
+ 2
745
+ − cqz
746
+
747
+ ϕq
748
+ j(z) = 0
749
+ (27)
750
+ Substituting (26) into (14) and replacing z by its average value given by (21)
751
+ (z →< z >q) we obtain the Milne equation for Pq(x)
752
+ P 2
753
+ q + 1
754
+ 2
755
+
756
+ P ′′
757
+ q
758
+ Pq
759
+ − 3
760
+ 2
761
+
762
+ P ′
763
+ q
764
+ �2
765
+ P 2
766
+ q
767
+
768
+  = ǫq + cq < z >q − ˜U(x)
769
+ (28)
770
+ We seek its approximate solution in the form
771
+ Pq(x) ≈
772
+ Dq
773
+ ˜ψ2q(x)
774
+ (29)
775
+ where Dq is a constant to be determined later. It is noteworthy that Pq(x)
776
+ from (29) yields P ′
777
+ 2n(0) = 0 because for even energy levels the wave function
778
+ is symmetric ( ˜ψ′
779
+ 2n(0) = 0). Hence (4) yields that S2n = 0 in (26). Substitution
780
+ of (29) in (28) results in the relationship
781
+ D2
782
+ q
783
+ ˜ψ4q(x) = λm(q+m) (p) − λm(q+m) (˜pq) + < z >q (cq − α)
784
+ (30)
785
+ We require that the approximate solution of SE (3) (i.e., the corresponding
786
+ term in (26)) coincides with our exact solution (11) for TDWP in the crucial
787
+ points x = 0 and x = xmin. The exact wave function ψq(x) given by (11) is
788
+ a normalized function in the range −π/2 ≤ x ≤ π/2 and its known values
789
+ ψq(0) and ψq(xmin) further replace the corresponding unknown values for the
790
+ approximation (3). The requirement for (30) to be satisfied at x = 0 yields
791
+ Dq = ˜ψ2
792
+ q(0)
793
+
794
+ λm(q+m) (p) − λm(q+m) (˜pq) + < z >q (cq − α)
795
+ (31)
796
+ 10
797
+
798
+ With the help of thus defined P2n(x) we calculate from (3) (with taking into
799
+ account that P ′
800
+ 2n(0) = 0 yielding (8)) the wave function at xmin
801
+ ψ2n(xmin) =
802
+ B2n
803
+
804
+ P2n(xmin)
805
+ cos
806
+
807
+
808
+ xmin
809
+
810
+ 0
811
+ dξ P2n(ξ)
812
+
813
+
814
+ (32)
815
+ As a result we obtain
816
+ B2
817
+ 2n = ψ2
818
+ 2n(xmin)D2n
819
+ ˜ψ2
820
+ 2n(xmin)
821
+ cos−2
822
+
823
+ D2n
824
+ xmin
825
+
826
+ 0
827
+
828
+ ˜ψ2
829
+ 2n(ξ)
830
+
831
+
832
+ (33)
833
+ Then the expression for the two-dimensional generalization of (9) takes the
834
+ form
835
+ k(β, ω) ≈
836
+ 1
837
+ Z(β, ω)
838
+ �1
839
+ 2
840
+
841
+ n
842
+
843
+
844
+ j=0
845
+ exp
846
+
847
+ −βΛj
848
+ 2n(ω)
849
+ � ψ2
850
+ 2n(xmin)D2n
851
+ ˜ψ2
852
+ 2n(xmin)
853
+ ×
854
+ cos−2
855
+
856
+ D2n
857
+ xmin
858
+
859
+ 0
860
+
861
+ ˜ψ2
862
+ 2n(ξ)
863
+
864
+  +
865
+
866
+
867
+ l=2N+2
868
+
869
+
870
+ j=0
871
+ exp
872
+
873
+ −βΛj
874
+ l (ω)
875
+ ��
876
+ (34)
877
+ It should be stressed that the summation over j yields the same factor as
878
+ that in the partition function Z(β, ω) and as a result they are canceled out.
879
+ Substituting (25) and (31) in (34) we finally obtain
880
+ k(β, ω) ≈
881
+
882
+
883
+
884
+
885
+
886
+ q=0
887
+ e
888
+ −β
889
+
890
+ λm(q+m)(p)− (cq)2
891
+ 2ω2
892
+ �
893
+
894
+
895
+ −1 �1
896
+ 2
897
+ N
898
+
899
+ n=0
900
+ e
901
+ −β
902
+
903
+ λm(2n+m)(p)− (c2n)2
904
+ 2ω2
905
+
906
+ ×
907
+ cos−2
908
+ ��
909
+ λm(2n+m) (p) − λm(2n+m)
910
+ ��
911
+ p2 − α < z >2n
912
+
913
+ + < z >2n (c2n − α) ×
914
+ ¯S2
915
+ m(2n+m)
916
+ ��
917
+ p2 − α < z >2n; 0
918
+ � xmin
919
+
920
+ 0
921
+ dξ cos−1 ξ
922
+ ¯S2
923
+ m(2n+m)
924
+ �√p2 − α < z >2n; sin ξ
925
+
926
+
927
+ ×
928
+ ¯S2
929
+ m(2n+m)
930
+ �√p2 − α < z >2n; 0
931
+ � ¯S2
932
+ m(2n+m) (p; sin xmin)
933
+ ¯S2
934
+ m(2n+m)
935
+ �√p2 − α < z >2n; sin xmin
936
+
937
+ ×
938
+
939
+ λm(2n+m) (p) − λm(2n+m)
940
+ ��
941
+ p2 − α < z >2n
942
+
943
+ + < z >2n (c2n − α)+
944
+
945
+
946
+ l=2N+2
947
+ e
948
+ −β
949
+
950
+ λm(l+m)(p)− (cl)2
951
+ 2ω2
952
+ ��
953
+ (35)
954
+ 11
955
+
956
+ where < z >2n is given by (21). The sum over n is that over the doublets
957
+ below the barrier top (see the discussion below (2)).
958
+ 5
959
+ Results and discussion
960
+ Notwithstanding to be cumbersome the formula (35) is easily programmed in
961
+ Mathematica because the crucial elements λm(q+m) (p) and ¯Sm(q+m) (p; s) are
962
+ implemented in this software package. We take the rate constant k(β, ωref)
963
+ for the internal stretching mode of the heavy atoms in the Zundel ion (with
964
+ a fixed frequency ωref) as a natural reference point. For this object there are
965
+ potential energy surfaces for several ROO as a result of quantum chemical
966
+ ab initio calculations [45], [46]. For the cases of HB in the Zundel ion with
967
+ ROO = 2.5 A, ROO = 2.6 A and ROO = 2.7 A the authors of [20] provide the
968
+ estimates of the dimensional coupling constant λ (a21 in their Table.1) as 0.1
969
+ a.u., 0.1 a.u. and 0.05 a.u. respectively. For the dimensional frequency Ω/2
970
+ (a02 in their Table.1) they present the value 0.039 a.u. for all three distances.
971
+ From here we obtain the reference value of α = 0.6 at ROO = 2.5 A and
972
+ α = 0.3 at ROO = 2.7 A. Also from the above results of [20] we obtain that
973
+ the reference value for the dimensionless frequency of O-O stretching mode is
974
+ ωref = 1.4. In the present article we are interested in the effect of the external
975
+ vibration on the tunneling process. To make the contribution into PT rate
976
+ constant from the over-barrier transition to be negligible compared with the
977
+ tunneling one even at T = 400 K (β = 0.0345) we restrict ourselves by the
978
+ case of PT for HB in the Zundel ion with very high barrier. For this reason
979
+ we consider ROO = 3.0 A from data of [46]. TDWP for this case is depicted
980
+ Fig.1. We carry out the parametric analysis of PT rate constant for HB in
981
+ the Zundel ion with large ROO = 3.0 A taking the above mentioned reference
982
+ value α = 0.3 and varying ω in the range 0.005 ≤ ω ≤ ωref. In this case there
983
+ are three doublets below the barrier top (see Fig.1) that means N = 2 in (35).
984
+ In the calculations of the rate constant we also take into account two levels
985
+ above the barrier top (i.e., replace ∞ in (35) by Nmax = 7) to make sure that
986
+ the contribution of the over-barrier transition can be discarded.
987
+ Fig.2 shows that at decreasing the frequency from the reference value ωref
988
+ we obtain the monotonic increase of PT rate constant in agreement with the
989
+ conclusion of [28]. However there is a critical value of the frequency ωc (for
990
+ α = 0.3 this value is ωc ≈ 0.01125) below which a drastic change of the
991
+ behavior takes place. In Fig.3, Fig.4 and Fig.5 the dependence of PT rate
992
+ constant on the frequency of the oscillator at ω < ωc is depicted. For a given
993
+ value of the coupling constant α at the corresponding ωc there is Λ0
994
+ 1 = Λ0
995
+ 0 and
996
+ a reversal in the total energy levels takes place (the energy levels of TDWP
997
+ do not depend on ω and retain their natural order ǫq+1 > ǫq). At ω > ωc we
998
+ have the normal sequence Λ0
999
+ 2n+1 > Λ0
1000
+ 2n where n = 0, 1, 2, ... while at ω < ωc
1001
+ 12
1002
+
1003
+  
1004
+
1005
+ 
1006
+
1007
+
1008
+  
1009
+ -1.5
1010
+ -1.0
1011
+ -0.5
1012
+ 0.0
1013
+ log10 ω
1014
+ -9
1015
+ -8
1016
+ -7
1017
+ -6
1018
+ -5
1019
+ log10 k(β, ω)
1020
+  β=0.0345 (T=400 K)
1021
+ ▲ ▲
1022
+
1023
+
1024
+
1025
+ ▲ ▲
1026
+ -1.5
1027
+ -1.0
1028
+ -0.5
1029
+ 0.0
1030
+ log10 ω
1031
+ -9
1032
+ -8
1033
+ -7
1034
+ -6
1035
+ -5
1036
+ log10 k(β, ω)
1037
+ ▲ β=0.069 (T=200 K)
1038
+ Fig. 2. The dependence of proton transfer rate constant on the frequency of the
1039
+ external oscillator above the critical value ω > ωc in the Zundel ion H5O+
1040
+ 2 (oxonium
1041
+ hydrate) with ROO = 3.0 A. The critical frequency is ωc ≈ 0.01125 for the value
1042
+ of the coupling constant α = 0.3 between the proton coordinate and that of the
1043
+ oscillator.
1044
+
1045
+ 
1046
+ 
1047
+
1048
+    
1049
+
1050
+ -2.2
1051
+ -2.1
1052
+ -2.0
1053
+ -1.9
1054
+ log10 ω
1055
+ -5
1056
+ 0
1057
+ 5
1058
+ 10
1059
+ 15
1060
+ log10 k(β, ω)
1061
+  β=0.0345 (T=400 K)
1062
+ Fig. 3. The dependence of proton transfer rate constant on the frequency of the
1063
+ external oscillator below the critical value ω < ωc in the Zundel ion H5O+
1064
+ 2 (oxonium
1065
+ hydrate) with ROO = 3.0 A at high temperature (T = 400 K (β = 0.0345). The
1066
+ critical frequency is ωc ≈ 0.01125 for the value of the coupling constant α = 0.3
1067
+ between the proton coordinate and that of the oscillator. Low resolution picture of
1068
+ the whole interval 0.005 < ω < ωc.
1069
+ an anomalous picture Λ0
1070
+ 2n+1 < Λ0
1071
+ 2n occurs at first for the ground state doublet
1072
+ (n = 0) and at further decrease of the frequency for higher ones below the
1073
+ barrier top (see, e.g., the case for ωmax in Fig.1). This transformation leads to
1074
+ an extraordinary alteration in the behavior of PT rate constant. Fig.3, Fig.4
1075
+ and Fig.5 vividly exhibit that in this case there are very rich manifestations
1076
+ of resonant activation, i.e., the bell-shaped peaks of PT rate enhancement
1077
+ by the external vibration at its symmetric coupling to the proton coordinate.
1078
+ The height of the main peak at ωmax = 0.006429109800232905 is temperature
1079
+ dependent (e.g., k(0.046, ωmax)/k(0.046, ωref) = 5.31 · 1022 at T = 300K and
1080
+ k(0.0345, ωmax)/k(0.0345, ωref) = 3.13 · 1023 at T = 400K). Fig.3 shows that
1081
+ 13
1082
+
1083
+
1084
+ 
1085
+
1086
+
1087
+
1088
+
1089
+
1090
+
1091
+
1092
+ 
1093
+
1094
+
1095
+
1096
+ 
1097
+
1098
+ 
1099
+
1100
+   
1101
+
1102
+
1103
+
1104
+
1105
+
1106
+
1107
+
1108
+
1109
+
1110
+
1111
+
1112
+
1113
+
1114
+
1115
+
1116
+
1117
+ -2.14
1118
+ -2.12
1119
+ -2.10
1120
+ -2.08
1121
+ -2.06
1122
+ log10 ω
1123
+ -4.0
1124
+ -3.5
1125
+ -3.0
1126
+ -2.5
1127
+ -2.0
1128
+ log10 k(β, ω)
1129
+  β=0.0345 (T=400 K)
1130
+ Fig. 4. The dependence of proton transfer rate constant on the frequency of the
1131
+ external oscillator below the critical value ω < ωc in the Zundel ion H5O+
1132
+ 2 (oxonium
1133
+ hydrate) with ROO = 3.0 A at high temperature (T = 400 K (β = 0.0345). The
1134
+ critical frequency is ωc ≈ 0.01125 for the value of the coupling constant α = 0.3
1135
+ between the proton coordinate and that of the oscillator. High resolution picture of
1136
+ the interval 0.00693 < ω < 0.009.
1137
+ ▲▲ ▲▲ ▲▲▲▲
1138
+ ▲▲▲
1139
+ ▲▲▲▲▲▲
1140
+
1141
+ ▲▲▲▲▲▲▲▲
1142
+ ▲▲▲▲▲▲▲▲ ▲
1143
+ ▲ ▲
1144
+
1145
+
1146
+ -2.2
1147
+ -2.1
1148
+ -2.0
1149
+ -1.9
1150
+ -1.8
1151
+ -1.7
1152
+ log10 ω
1153
+ -5
1154
+ 0
1155
+ 5
1156
+ 10
1157
+ 15
1158
+ log10 k(β, ω)
1159
+ ▲ β=0.069 (T=200 K)
1160
+ Fig. 5. The dependence of proton transfer rate constant on the frequency of the
1161
+ external oscillator below the critical value ω < ωc in the Zundel ion H5O+
1162
+ 2 (oxonium
1163
+ hydrate) with ROO = 3.0 A at low temperature (T = 200 K (β = 0.069). The critical
1164
+ frequency is ωc ≈ 0.01125 for the value of the coupling constant α = 0.3 between
1165
+ the proton coordinate and that of the oscillator.
1166
+ at high temperature β = 0.0345 the approach to the main peak from the side
1167
+ of higher frequencies is not smooth. There is a sequence of comb-like regions of
1168
+ increasing intensity with the decrease of the frequency ω (see Fig.4 for higher
1169
+ resolution picture). The intensity of these combs decreases with the decrease of
1170
+ temperature and at β = 0.069 they are not discernable (see Fig.5). At α = 0.3
1171
+ and ω < ωc the ground state doublet approaches the bottom of TDWP (see,
1172
+ e.g., Fig.1) and at ω < 0.005 the former becomes below the latter.
1173
+ By attaining the resonance condition ωmax one can obtain a very efficient
1174
+ mechanism of PT rate enhancement. For α = 0.3 we have the acceleration
1175
+ up to 23 orders of magnitude compared with the reference value ωref = 1.4.
1176
+ 14
1177
+
1178
+ The mathematical reason for the phenomenon of such PT resonance acti-
1179
+ vation is in the fact that the function ¯Sm(2n+m)
1180
+ ��
1181
+ p2 − α
1182
+
1183
+ −c2n
1184
+ ω2
1185
+
1186
+ ; sin xmin
1187
+
1188
+ taking place in the denominators of (35) becomes extremely small for the
1189
+ second doublet n = 1 at ω = ωmax. In our opinion the descriptive physical
1190
+ origin of the phenomenon can be revealed from the following empirical obser-
1191
+ vation. Let us consider the wave functions in the left and the right wells for
1192
+ the j-th doublet (j = 1, 2, 3) defined as usual ψ(j)
1193
+ R = 1/
1194
+
1195
+ 2
1196
+
1197
+ ψ(j)
1198
+ + − ψ(j)
1199
+
1200
+
1201
+ and
1202
+ ψ(j)
1203
+ L
1204
+ = 1/
1205
+
1206
+ 2
1207
+
1208
+ ψ(j)
1209
+ + + ψ(j)
1210
+
1211
+
1212
+ respectively where ψ(j)
1213
+ ±
1214
+ are given by (11). Here +
1215
+ means the upper energy level in the doublet while − means the lower one.
1216
+ Then we recall the notion of the Rabi frequency in energetic units (multiplied
1217
+ by the Planck constant) as the module of the interaction energy, i.e., that of
1218
+ the product of the electromagnetic field strength and the matrix element of
1219
+ the dipole moment for the transition between the corresponding energy lev-
1220
+ els. The dimensional resonance condition Ef − Ei = ℏΩRabi
1221
+ if
1222
+ =| Hint | in the
1223
+ dimensionless form is ǫf − ǫi =
1224
+
1225
+ 2δ ωRabi
1226
+ if
1227
+ =| hint |. Analogously we equate
1228
+ the difference between the energy levels ǫ(j)
1229
+ + − ǫ(j)
1230
+ − in the j-th doublet and the
1231
+ module of the matrix element of the interaction energy term | αz sin2 x | from
1232
+ (14) with the functions ψ(j)
1233
+ R and ψ(j)
1234
+ L . For sin2 x we take the value of its matrix
1235
+ element
1236
+ < ψ(j)
1237
+ R | sin2 x | ψ(j)
1238
+ L >=
1239
+ π/2
1240
+
1241
+ −π/2
1242
+ dx ψ(j)
1243
+ R sin2 x ψ(j)
1244
+ L
1245
+ (36)
1246
+ For z we take the average < z >− given by (21), i.e., < z >−= −c(j)
1247
+ − /ω2. As
1248
+ a result we have an empirical relationship
1249
+ ǫ(j)
1250
+ + − ǫ(j)
1251
+ − = α | −c(j)
1252
+ − < ψ(j)
1253
+ R | sin2 x | ψ(j)
1254
+ L >|
1255
+ ω2
1256
+ (37)
1257
+ From (37) we obtain the resonance frequency ω(j)
1258
+ ±
1259
+ ω(j)
1260
+ ± =
1261
+
1262
+
1263
+
1264
+
1265
+ �α | −c(j)
1266
+ − < ψ(j)
1267
+ R | sin2 x | ψ(j)
1268
+ L >|
1269
+ ǫ(j)
1270
+ + − ǫ(j)
1271
+
1272
+ (38)
1273
+ At α = 0.3 and δ = 1/8 we have for j = 2, i.e., for the second doublet
1274
+ ω(2)
1275
+ ±
1276
+ = 0.00673 which is rather close to ωmax ≈ 0.00643 for the main peak.
1277
+ In our opinion such quantitative agreement can not be fortuitous. It suggests
1278
+ the physical interpretation of PT resonant activation as an analog of the Rabi
1279
+ transition between the left and the right wells under the influence of the vibra-
1280
+ tion with a suitable frequency applied to the proton in DWP. The case j = 1
1281
+ 15
1282
+
1283
+ yields the resonance frequency ω(1)
1284
+ ± = 0.00795 which is within the range of the
1285
+ right comb-like region in Fig.4. Constructing various matrix elements between
1286
+ wave functions of different doublets for both wells ψin
1287
+ R = 1/
1288
+
1289
+ 2 (ψi − ψn) and
1290
+ ψlm
1291
+ L = 1/
1292
+
1293
+ 2 (ψl + ψm) yields
1294
+ ǫ(j)
1295
+ + − ǫ(j)
1296
+ − = α | −ck < ψin
1297
+ R | sin2 x | ψlm
1298
+ L >|
1299
+ ω2
1300
+ (39)
1301
+ where j = 1, 2, 3 and {k, i, n, l, m = 0, 1, 2, 3, 4, 5}. Then we obtain for the
1302
+ third doublet j = 3 the resonance frequencies: 0.00722 at
1303
+ {l = 2, m = 5, i = 1, k = n = 0}; 0.00725 at {k = l = 0, m = 3, i = 1, n = 4};
1304
+ 0.00753 at
1305
+ {l = 1, k = m = 4, i = 2, n = 5} which are within the range of the left comb-
1306
+ like region in Fig.4. Constructing various matrix elements between wave func-
1307
+ tions of different doublets for the left well yields
1308
+ ǫ(j)
1309
+ + − ǫ(j)
1310
+ − = α | −ck < ψlm
1311
+ L | sin2 x | ψl′m′
1312
+ L
1313
+ >|
1314
+ ω2
1315
+ (40)
1316
+ Then we obtain for the third doublet j = 3 the resonance frequencies: 0.00787
1317
+ at
1318
+ {l = 1, k = m = 4, l′ = 5, m′ = 4} which is within the range of the right comb-
1319
+ like region in Fig.4 and 0.00723 at {l = 0, m = 3, l′ = 4, k = m′ = 5} which is
1320
+ within the range of the left comb-like region in Fig.4. In our opinion these
1321
+ numerous resonance frequencies provide qualitative explanation of severe os-
1322
+ cillations in Fig.4.
1323
+ Also in this connection it is worthy to note that for the symmetric mode cou-
1324
+ pling (Hint = λZX2) the effect of resonant activation results from the term
1325
+ − (cq)2 /2ω2 in the total energy levels Λk
1326
+ q (19). The energy levels of TDWP ǫq
1327
+ are re-normalized due to the coupling of the proton coordinate to the oscilla-
1328
+ tor. For anti-symmetric (Hint = λZX) and squeezed (Hint = λZ2X2) mode
1329
+ couplings this effect is absent. In the former case the coupling strength is zero
1330
+ c{as}
1331
+ q
1332
+ = 0 due to the symmetry of the wave functions [25] that leads to the
1333
+ actual lack of the crucial term −
1334
+
1335
+ c{as}
1336
+ q
1337
+ �2 / (2ω2) in the formula (19) for Λk
1338
+ q. In
1339
+ the latter case the expression for Λk
1340
+ q [25]
1341
+ Λk
1342
+ q|{sq} ≈ λm(q+m) (p) + 1
1343
+ 2 − m2 − p2 + (4k + 1)
1344
+
1345
+
1346
+
1347
+ �δ
1348
+
1349
+ ω2 + 2c{sq}
1350
+ q
1351
+
1352
+ 2
1353
+ (41)
1354
+ does not contain the required term at all. For instance the interaction of the
1355
+ proton in HB with an IR laser field in the dipole approximation (the dipole
1356
+ moment d ∝ x) belongs to the anti-symmetric type and does not fit our
1357
+ 16
1358
+
1359
+ requirement for PT resonant activation by a low-frequency vibration (high-
1360
+ frequency Rabi transitions between different doublets stimulated by an IR
1361
+ laser field certainly can considerably interfere PT process). Only taking into
1362
+ account that a realistic dipole moment contains the appropriate higher or-
1363
+ der contributions (d ∝ x + const x2 + ...) may provide the required type of
1364
+ interaction in this case.
1365
+ We conclude that the suggested approach enables one to obtain an analytically
1366
+ tractable expression for proton transfer rate constant in a hydrogen bond. It
1367
+ is based on the Schr¨odinger equation with the model Hamiltonian taking into
1368
+ account only the proton coordinate and an external oscillator coupled to it
1369
+ (the heavy atoms stretching mode, a low-frequency vibration of the protein
1370
+ scaffold in an enzyme, etc). The literature data from quantum chemical ab
1371
+ initio calculations of the potential energy surface are transformed into the
1372
+ parameters of the model trigonometric double-well potential. For the two-
1373
+ dimensional Schr¨odinger equation with this potential the analytic solution
1374
+ within the framework of the standard adiabatic approximation is available.
1375
+ The parameters of the model for the Zundel ion in the case of the heavy atoms
1376
+ stretching mode are extracted from the literature data on IR spectroscopy and
1377
+ serve as a reference point. The approach yields the pronounced resonant effect
1378
+ of proton transfer acceleration in some frequency range of the oscillator (below
1379
+ the corresponding critical value of the frequency) at its symmetric coupling
1380
+ to the proton coordinate. The phenomenon is absent for anti-symmetric and
1381
+ squeezed mode couplings.
1382
+ 6
1383
+ Appendix
1384
+ In dimensional units the one-dimensional SE for a quantum particle with the
1385
+ reduced mass M (proton in our case of usual HB or deuterium in the case of
1386
+ a deuterated HB) has the form
1387
+ d2ψ(X)
1388
+ dX2
1389
+ + 2M
1390
+ ℏ2 [E − V (X)] ψ(X) = 0
1391
+ (42)
1392
+ where −L ≤ X ≤ L and V (X) is a DWP. The latter is assumed to be infinite
1393
+ at the boundaries of the finite interval for the spatial variable X = ±L. The
1394
+ dimensionless values for the distance x, the potential U(x) and the energy ǫ
1395
+ are introduced as follows
1396
+ x = πX
1397
+ 2L ;
1398
+ U(x) = 8ML2
1399
+ ℏ2π2 V (X);
1400
+ ǫ = 8ML2E
1401
+ ℏ2π2
1402
+ (43)
1403
+ where −π/2 ≤ x ≤ π/2. As a result we obtain the dimensionless SE (1). In
1404
+ 17
1405
+
1406
+ the case of the trigonometric DWP (10) the transformation formulas for the
1407
+ parameters {m, p} into {B, D} (B is the barrier height and D is the barrier
1408
+ width) are [24]
1409
+ p =
1410
+
1411
+ B
1412
+ 1 − [cos (D/2)]2;
1413
+ m2 − 1
1414
+ 4 =
1415
+ B [cos (D/2)]4
1416
+
1417
+ 1 − [cos (D/2)]2�2
1418
+ (44)
1419
+ The Hamiltonian of the two-dimensional SE includes the spatial variable Z
1420
+ (e.g., that for the reduced mass of the heavy atoms in HB which in the case of
1421
+ the Zundel ion is the O-O stretching mode) of the harmonic potential ΩZ2/2
1422
+ with the frequency Ω. We introduce the dimensionless distance x = πX/ (2L)
1423
+ where −π/2 ≤ x ≤ π/2 and dimensionless coordinate z = πZ/ (2L) where
1424
+ −∞ < z < ∞. The dimensionless coupling constant α in (14) for the symmet-
1425
+ ric mode coupling (this case was proved in [20] to be pertinent for the Zundel
1426
+ ion), the dimensionless inverse temperature β in (20), (34) and (35) and the
1427
+ dimensionless frequency ω in (14) are
1428
+ α = 26λML5
1429
+ ℏ2π5
1430
+ ;
1431
+ β =
1432
+ ℏ2π2
1433
+ 8ML2kBT ;
1434
+ ω = 4√2MµL2Ω
1435
+ ℏπ2
1436
+ ;
1437
+ µ =
1438
+ M1M2
1439
+ M1 + M2
1440
+ (45)
1441
+ Here λ is a dimensional coupling constant for the case of the symmetric mode
1442
+ coupling term (λZX2) and µ is the reduced mass of the heavy atoms in HB
1443
+ A1 − H · · · A2. In the case of the Zundel ion it is µ = MO/2. As a result δ in
1444
+ (14) is δ = M/µ = 2M/MO. Taking the proton mass M = 1 a.u and that of
1445
+ the oxygen atom MO = 16 a.u. we have δ = 1/8.
1446
+ Acknowledgements. The author is grateful to Prof. Yu.F. Zuev for helpful dis-
1447
+ cussions. The work was supported from the government assignment for FRC
1448
+ Kazan Scientific Center of RAS.
1449
+ 18
1450
+
1451
+ References
1452
+ [1] N.D. Sokolov, M.V. Vener, Chem.Phys. 168 (1992) 29-40.
1453
+ [2] S.Hammes-Schiffer, J.C. Tully, J.Phys.Chem. 99 (1995) 5193-5191.
1454
+ [3] W.J. Bruno, W, Bialek, Biophys.J. 63 (1992) 689-699.
1455
+ [4] J. Basran, M.J. Sutcliffe, N.S. Scrutton, Biochemistry 38 (1999) 3218-3222.
1456
+ [5] A. Kohen, J.P. Klinman, Chem.Biol. 6 (1999) R191-R198.
1457
+ [6] D. Antoniou, S.D. Schwartz, J.Phys.Chem. B 105 (2001) 5553-5558.
1458
+ [7] K.O. Alper, M. Singla, J.L. Stone, C.K. Bagdassarian, Prot.Sci. 10 (2001) 1319-
1459
+ 1330.
1460
+ [8] P.K. Agarwal, J.Am.Chem.Soc. 127 (2005) 15248-15256.
1461
+ [9] A.E. Sitnitsky, Physica A 371 (2006) 481-491.
1462
+ [10] A.E. Sitnitsky, Physica A 387 (2008) 5483-5497.
1463
+ [11] A. Kohen, Acc.Chem.Res. 48 (2015) 466-473.
1464
+ [12] R. Meyer, R. R. Ernst, J.Chem.Phys. 86 (1987) 784-801.
1465
+ [13] R. Meyer, R. R. Ernst, J.Chem.Phys. 93 (1990) 5518-5532.
1466
+ [14] P.-Y. Yang, J. Cao, J.Phys.Chem.Lett. 12 (2021) 9531-9538.
1467
+ [15] J.F. Triana, F.J. Hern´andez, F. Herrera, J.Chem.Phys. 152 (2020) 234111.
1468
+ [16] A. Mandal, X. Li, P. Huo, J.Chem.Phys. 156 (2022) 014101.
1469
+ [17] A.D. Godbeer, J.S. Al-Khalili, P.D. Stevenson, Phys.Chem.Chem.Phys. 17
1470
+ (2015) 13034-13044.
1471
+ [18] J.H. Weiner, J.Chem.Phys. 68 (1978) 2492-2506.
1472
+ [19] J.H. Weiner, J.Chem.Phys. 69 (1978) 4743-4749.
1473
+ [20] R. Janoschek, E.G. Weidemann, G. Zundel, J.Chem.Soc., Faraday Transactions
1474
+ 2: Mol.Chem.Phys. 69 (1973) 505-520.
1475
+ [21] M.V. Vener, J. Sauer, Chem.Phys.Lett. 312 (1999) 591-597.
1476
+ [22] M.V. Vener, O. K¨uhn, J. Sauer, J.Chem.Phys. 114 (2001) 240-249.
1477
+ [23] A.E. Sitnitsky, Chem.Phys.Lett. 676C (2017) 169-173.
1478
+ [24] A.E. Sitnitsky, Comput.Theor.Chem. 1160 (2019) 19-23.
1479
+ [25] A.E. Sitnitsky, J.Mol.Spectr. 372 (2020) 111347.
1480
+ 19
1481
+
1482
+ [26] D. Ferro-Costas, A. Fern´andez-Ramos, Ch.9 in: Tunnelling in molecules: nuclear
1483
+ quantum effects from bio to physical chemistry, eds. J. K¨astner, S. Kozuch,
1484
+ Royal Society of Chemistry 2021.
1485
+ [27] Z. Smedarchina, W. Siebrand, A. Fern´andez-Ramos, J.Chem.Phys. 148 (2018)
1486
+ 102307.
1487
+ [28] Q. Shi, L. Zhu, L. Chen, J.Chem.Phys. 135 (2011) 044505.
1488
+ [29] V. Jelic, F. Marsiglio, Eur.J.Phys. 33 (2012) 1651-1666.
1489
+ [30] A. Ibrahim, F. Marsiglio, Am.J.Phys. 86 (2018) 180-185.
1490
+ [31] A.E. Sitnitsky, Vibr.Spectrosc. 93 (2017) 36-41.
1491
+ [32] Q. Dong, F.A. Serrano, G.-H. Sun, J. Jing, S.-H. Dong, Adv.High Energy Phys.
1492
+ (2018) 9105825.
1493
+ [33] S. Dong, Q. Dong, G.-H. Sun, S. Femmam, S.-H. Dong, Adv.High Energy Phys.
1494
+ (2018) 5824271.
1495
+ [34] Q. Dong, G.-H. Sun, J. Jing, S.-H. Dong, Phys.Lett. A383 (2019) 270-275.
1496
+ [35] Q. Dong, S.-S. Dong, E. Hern´andez-M´arquez, R. Silva-Ortigoza, G.-H. Sun,
1497
+ S.-H. Dong, Commun.Theor.Phys. 71 (2019) 231-236.
1498
+ [36] Q. Dong, A.J. Torres-Arenas, G.-H. Sun, Camacho-Nieto, S. Femmam, S.-H.
1499
+ Dong, J.Math.Chem. 57 (2019) 1924-1931.
1500
+ [37] Q. Dong, G.-H. Sun, M. Avila Aoki, C.-Y. Chen, S.-H. Dong, Mod.Phys.Lett.
1501
+ A 34 (2019) 1950208.
1502
+ [38] G.-H. Sun, Q. Dong, V.B. Bezerra, S.-H. Dong, J.Math.Chem. 60 (2022) 605-
1503
+ 612.
1504
+ [39] A.E. Sitnitsky, Comput.Theor.Chem. 1138 (2018) 15-22.
1505
+ [40] A.E. Sitnitsky, Comput.Theor.Chem. 1200 (2021) 113220.
1506
+ [41] J. Gamper, F. Kluibenschedl, A.K. H. Weiss, T.S. Hofer,
1507
+ Phys.Chem.Chem.Phys. 24 (2022) 25191.
1508
+ [42] I.V. Komarov, L.I. Ponomarev, S.Yu. Slavaynov, Spheroidal and Coloumb
1509
+ spheroidal functions, Moscow, Science, 1976.
1510
+ [43] C.M. Porto, N.H. Morgon, Comput.Theor.Chem. 1187 (2020) 112917.
1511
+ [44] C.M. Porto, G.A. Barros, L.C. Santana, A.C. Moralles, N.H. Morgon,
1512
+ J.Mol.Model. 28 (2022) 293-301.
1513
+ [45] Q. Yu, J.M. Bowman, J.Phys.Chem.Lett. 7 (2016) 5259-5265.
1514
+ [46] Z.-H. Xu, Atomistic simulations of proton transport in the gas and condensed
1515
+ phases: spectroscopy, reaction kinetics and Grotthuss mechanism, PhD thesis,
1516
+ Basel, 2018.
1517
+ 20
1518
+
1519
+ [47] P. Atkins, J. de Paula, R.Friedman, Quanta, Matter, and Change. A molecular
1520
+ approach to physical chemistry, Freeman, 2009.
1521
+ [48] A.P. Prudnikov, Yu.A. Brychkov, O.I. Marichev, Integrals and series. Special
1522
+ functions., 2-d ed., FIZMATLIT, Moscow, 2003.
1523
+ 21
1524
+
9dE4T4oBgHgl3EQfDQsU/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9tE4T4oBgHgl3EQf3g1J/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eedf691c0e546d70202de07db7c90bd7678096fccb5b02b49234016377e93525
3
+ size 165732
AtE2T4oBgHgl3EQfRQeF/content/tmp_files/2301.03779v1.pdf.txt ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CIRED workshop on E-mobility and power distribution systems
2
+ Porto, 2-3 June 2022
3
+
4
+ Paper n° 1347
5
+
6
+
7
+ CIRED 2022 Workshop
8
+ 1/4
9
+ ESTIMATING REQUIRED FLEXIBILITY FOR SECURE DISTRIBUTION GRID
10
+ OPERATION CONSIDERING THE UNCERTAINTIES OF EV AND PV
11
+
12
+
13
+
14
+ Manijeh ALIPOUR
15
+ Omid ALIZADEH-MOUSAVI
16
+
17
+ Depsys, Puidoux - Switzerland
18
+ Depsys, Puidoux - Switzerland
19
+
20
21
22
+
23
+ ABSTRACT
24
+ Renewable energy productions and electrification of
25
+ mobility are promising solutions to reduce greenhouse gas
26
+ emissions. Their effective integration in a power grid
27
+ encounters several challenges. The uncertain nature of
28
+ renewable energy productions as well as stochastic
29
+ consumptions of electric vehicles introduce remarkable
30
+ intermittency to a distribution grid and results in bi-
31
+ uncertain characteristics of both supply and demand sides.
32
+ One way to verify the secure grid operation within
33
+ acceptable voltage and loading levels is to assess its
34
+ required flexibility considering possible boundaries of
35
+ uncertain variables. In this paper, first a comprehensive
36
+ linear model of distribution grid considering all pertaining
37
+ constraints is presented. Then, a flexibility estimation
38
+ technique is proposed based on the feasibility study of the
39
+ uncertain space of photovoltaic power productions and
40
+ load
41
+ containing
42
+ electric
43
+ vehicles.
44
+ The
45
+ proposed
46
+ methodology uses grid monitoring data to determine grid
47
+ state and to model uncertain parameters. The model is
48
+ applied on a real low voltage (LV) system equipped with
49
+ grid monitoring devices.
50
+ INTRODUCTION
51
+ The deployment of renewable energy productions has
52
+ many advantages, comprising economic convenience,
53
+ reducing the reliance on fossil fuel markets (especially, gas
54
+ and oil) and environmental friendliness. In addition, the
55
+ wide penetration of renewable energy sources accelerates
56
+ occupation in the EU, by job creation in different ‘green’
57
+ technologies. Aim behind the European Green Deal
58
+ (COM(2019) 640 final) is to be the world’s first climate-
59
+ neutral continent by 2050. In spite of renewable
60
+ generation’s significant advantages, it has the defects of
61
+ uncertainty and fluctuation. Besides to renewable
62
+ generations, electric vehicles as a new variable load
63
+ increase the intermittency of distribution grid and leads to
64
+ bi-uncertain characteristics of both demand and supply
65
+ sides. With the increased penetration of these uncertainty
66
+ sources, the modern power system is confronted with a
67
+ challenge to preserve the reliability, security, and quality
68
+ of supply. Thus, a more flexible distribution grid is
69
+ required.
70
+ The distribution grid flexibility can be defined from
71
+ different perspectives [1]. In [2], the flexibility is specified
72
+ as the degree to which a system can change its electricity
73
+ consumption and production in response to anticipated or
74
+ unanticipated variabilities. The flexibility envelope
75
+ method is presented in [3] to assess the flexibility potential
76
+ of individual power system assets and their aggregation at
77
+ the system level. A flexibility measure is developed in [4]
78
+ which indicates the largest uncertainty deviation that a
79
+ system can bear. In [5] an algorithm is proposed which
80
+ allocates aggregate-level control decisions amongst
81
+ individual
82
+ systems
83
+ economically.
84
+ Concerning
85
+ the
86
+ flexibility estimation of distribution grids, an index is
87
+ proposed in [6] that is related to specific viewpoints such
88
+ as power regulation ability and energy balance ability of
89
+ distribution grids. In [7] the net load uncertainty is
90
+ considered in the flexibility assessment of distribution
91
+ grids.
92
+ The secure grid operation can be verified by assessing the
93
+ required flexibility considering boundaries of uncertain
94
+ variables. A hyper-rectangle is implemented for heat
95
+ exchanger networks [8] to define the multi-dimensional
96
+ region of uncertain variables. In this paper, the flexibility
97
+ and the lack of flexibility are quantified by determining
98
+ acceptable boundaries of uncertain parameters in
99
+ distribution grids.
100
+ A large portion of uncertainty resources are not monitored
101
+ by the Distribution System Operator (DSO) and as a
102
+ consequence, the level of available flexibility of the grid
103
+ as well as the lack of flexibility caused by the uncertain
104
+ resources are not identified. In such a situation, the power
105
+ flows from unmonitored and uncontrolled resources may
106
+ cause voltage violations and congestions. Therefore, the
107
+ monitoring of grid and the control of resources connected
108
+ to the grid are vital for the secure operation of distribution
109
+ grid.
110
+ The DSOs around the world are going through the roll-out
111
+ of smart metering and grid monitoring devices [9, 10].
112
+ However, equipping all the nodes of distribution grid with
113
+ monitoring devices is practically impossible due to costs
114
+ of required underlying infrastructure. In addition, although
115
+ all the grid topology information is usually available in the
116
+ DSO’s Geographic Information System (GIS), a trustable
117
+ and up-to-date data of LV grid topology and parameters is
118
+ not available to take decisions for the secure grid operation
119
+ [11]. To face these challenges, the grid sensitivity
120
+ coefficients of a subset of grid nodes can be used as an
121
+ approximation of the power flow model. The sensitivity
122
+ coefficients can be calculated between a plurality of
123
+ measurement nodes without using the information of grid
124
+ parameters [13], hereafter called model-less approach.
125
+ This approach significantly reduces the required number
126
+ of monitored nodes, thus reducing the cost of required grid
127
+ monitoring infrastructure, and does not rely on the
128
+ availability of an accurate and up-to-date grid parameters.
129
+ This paper proposes a flexibility estimation model for
130
+ distribution grids. The main contributions are as follows:
131
+ 1) The proposed flexibility estimation method is based on
132
+ the feasibility study of the uncertain space of load
133
+ containing electric vehicles and photovoltaic powers. It
134
+ allows evaluating the flexibility and lack of flexibility
135
+ based on the coverage of the feasible space to the
136
+
137
+ CIRED CIRED workshop on E-mobility and power distribution systems
138
+ Porto, 2-3 June 2022
139
+
140
+ Paper n° 1347
141
+
142
+
143
+ CIRED 2022 Workshop
144
+ 2/4
145
+ uncertain region.
146
+ 2) The projection of each direction in the uncertain space
147
+ to the feasible space is illustrated and investigated. The
148
+ illustrated projection to the feasible space can assist in
149
+ determining the reason of flexibility inadequacy.
150
+ 3) The linear power flow model based on the sensitivity
151
+ coefficients is used to model the grid operation
152
+ constraints. It properly accounts for the secure grid
153
+ operation within acceptable voltage levels and without
154
+ congestions.
155
+ 4) The proposed model for the calculation of the
156
+ flexibility index is a linear and convex mathematical
157
+ optimization problem that can be solved efficiently
158
+ with non-commercial solvers and the optimality of the
159
+ solution is guaranteed.
160
+ The remaining parts of the paper is structured as following.
161
+ Next section provides the definition of distribution grid
162
+ flexibility. Then, the proposed mathematical formulation
163
+ is presented. The results of applying the method on a real
164
+ LV grid are evaluated. Finally, the conclusions and
165
+ outcomes are discussed.
166
+ THE FLEXIBILITY OF DISTRIBUTION GRID
167
+ In this paper, the flexibility of distribution grid is described
168
+ as the capability of distribution grid to effectively cope
169
+ with multiple uncertainties of grid operation. Fig. 1
170
+ illustrates the structure of the proposed flexibility
171
+ estimation algorithm in a LV distribution grid. The power
172
+ grid contains Electric Vehicles (EV), EV charging
173
+ stations, photovoltaic arrays and commercial load. In the
174
+ proposed method, grid monitoring data are used to
175
+ calculate the grid state and the sensitivity coefficients as
176
+ well as to model uncertain parameters. Fig. 1 shows the
177
+ way that flexibility quantification is realized using the grid
178
+ monitoring data.
179
+
180
+ Flexibility assessment and quantification
181
+ MV/LV
182
+ DSO
183
+ Real-time data acquisition
184
+ Monitoring device
185
+
186
+ Fig. 1. Proposed monitoring based flexibility
187
+ quantification structure
188
+
189
+ In fact, the proposed model is accomplished by detailed
190
+ and accurate information of the resources from the grid
191
+ monitoring facilities. Fig. 2 shows the concept of proposed
192
+ flexibility quantification method. The uncertain space is
193
+ calculated using historic data and prediction methods. The
194
+ margin of feasible space characterizes the maximum
195
+ feasible deviation from the expected operation point ‘O’.
196
+ Any operation point in the uncertain space can be
197
+ characterized by two components: the direction vector and
198
+ the normalized variation value. The direction matrix
199
+ expresses the direction vector, in which each diagonal
200
+ element indicates an uncertain variable, and the range of
201
+ element’s value is [−1, 1] [12]. In order to guarantee that
202
+ direction matrix elements represent a unique direction, at
203
+ least one of the direction matrix elements must be set to -
204
+ 1 or 1. This will refrain the duplication of direction
205
+ matrices with the same direction like diag(1, 0.5) and
206
+ diag(0.8, 0.4).
207
+ Any point outside the feasible region indicates an
208
+ infeasible grid operation point where the technical
209
+ constraints of the grid are not satisfied. The minimum
210
+ feasible deviation direction stands for the critical direction.
211
+ The boundary point, shown by ‘C’ in Fig.2, corresponding
212
+ to the critical direction is the flexibility index which
213
+ reveals the adequacy or insufficiency of flexibility
214
+ resources in the system.
215
+ O
216
+ C
217
+ Feasible Region
218
+ Max
219
+ pv
220
+ P
221
+ min
222
+ pv
223
+ P
224
+ min
225
+ lP
226
+ Max
227
+ lP
228
+ Uncertain Region
229
+ pv
230
+ P
231
+ lP
232
+ pv
233
+ P
234
+ lP
235
+
236
+ Fig. 2. The concept of the proposed flexibility
237
+ quantification method
238
+ MATHEMATICAL FORMULATION
239
+ A mathematical optimization problem is formulated to
240
+ determine the feasible space. The objective function is the
241
+ minimum value of maximum feasible variation in the
242
+ direction D (𝛽𝐷) of feasible operation region. In addition,
243
+ the flexibility (F) is defined as the minimum value of 𝛽𝐷.
244
+
245
+ 𝐹 = 𝑚𝑖𝑛𝛽𝐷
246
+ (1)
247
+ 𝛽𝐷 = 𝑚𝑎𝑥(𝛽)
248
+ (2)
249
+ 𝑋 = 𝑋̃ + 𝛽𝐷∆𝑋
250
+ (3)
251
+
252
+ where 𝑋 and 𝑋̃ represent the uncertain parameter and its
253
+ expected value which are distribution grid’s load and PV
254
+ generation. ∆𝑋 indicates the expected value and
255
+ maximum/minimum value of uncertain parameter’s
256
+ difference. If the flexibility index is equal to one (F=1), the
257
+ flexibility resources are enough for the secure operation of
258
+ the grid. In addition, it means that feasible region covers
259
+ the uncertain region. However, if the flexibility index is
260
+ less than one (F <1), there is not sufficient margin for the
261
+ technically secure grid operation and additional flexibility
262
+ resources are required. The direction matrix D is a
263
+ diagonal matrix that can be defined as:
264
+ 𝐷 =
265
+ [
266
+
267
+
268
+ 𝑑1
269
+ 𝑑2
270
+
271
+ 𝑑𝑛]
272
+
273
+
274
+
275
+
276
+ (4)
277
+
278
+ CIRED CIRED workshop on E-mobility and power distribution systems
279
+ Porto, 2-3 June 2022
280
+
281
+ Paper n° 1347
282
+
283
+
284
+ CIRED 2022 Workshop
285
+ 3/4
286
+ where diagonal element di is the direction on the ith
287
+ uncertain parameter and n is the number of uncertain
288
+ parameters.
289
+ For the flexibility estimation a linear power flow model is
290
+ used based on the sensitivity coefficients calculated using
291
+ the model-less approach. The formulation is described
292
+ below.
293
+ Δ𝑉𝑚 = ∑ [𝐾𝑚,𝑛
294
+ 𝑉𝑃 Δ𝑃𝑛 + 𝐾𝑚,𝑛
295
+ 𝑉𝑄 Δ𝑄𝑛]
296
+ 𝑛
297
+
298
+ (5)
299
+ Δ𝐼𝑙 = ∑ [𝐾𝑙,𝑛
300
+ 𝐼𝑃Δ𝑃𝑛 + 𝐾𝑙,𝑛
301
+ 𝐼𝑄Δ𝑄𝑛 ]
302
+ 𝑛
303
+
304
+ (6)
305
+ 𝑉𝑚𝑖𝑛 ≤ 𝑉𝑚
306
+ 0 + ∆𝑉𝑚 ≤ 𝑉𝑚𝑎𝑥
307
+ (7)
308
+ −𝐼𝑚𝑎𝑥 ≤ 𝐼𝑙
309
+ 0 + ∆𝐼𝑙
310
+ ≤ 𝐼𝑚𝑎𝑥
311
+ (8)
312
+ ∆𝑉𝑚 = 𝑉𝑚 − 𝑉𝑚
313
+ 0
314
+ (9)
315
+ ∆𝐼𝑙
316
+ = 𝐼𝑙 − 𝐼𝑙
317
+ 0
318
+ (10)
319
+ 0 ≤ 𝑆𝑔𝑟𝑖𝑑 ≤ 𝑠𝑇𝑟𝑎𝑓𝑜
320
+ 𝑚𝑎𝑥
321
+ (11)
322
+ The voltage and current sensitivity coefficients are used in
323
+ equations (5) and (6), respectively, to model the impacts
324
+ of nodal active and reactive power changes (Δ𝑃𝑛 and
325
+ Δ𝑄𝑛 ) on the nodal voltage variations (Δ𝑉𝑚) and branch
326
+ current variations (Δ𝐼𝑙). The sensitivity coefficients, i.e.,
327
+ 𝐾𝑉𝑃, 𝐾𝑉𝑄, 𝐾𝐼𝑃𝑎𝑛𝑑 𝐾𝐼𝑄, are computed around the grid
328
+ operation point corresponding to voltage 𝑉𝑚
329
+ 0 and current
330
+ 𝐼𝑙,𝑡
331
+ 0 . The voltage level at each node and the branch current
332
+ should be within the allowed limits as given in (7) and (8),
333
+ respectively. The voltage and current deviations can be
334
+ calculated as given by equations (9) and (10). The
335
+ constraint (11) limits the apparent power flow in the
336
+ MV/LV transformer (𝑆𝑔𝑟𝑖𝑑) by the capacity of transformer
337
+ (𝑠𝑚𝑎𝑥
338
+ 𝑇𝑟𝑎𝑓𝑜).
339
+ RESULTS AND DISCUSSIONS
340
+ In this section, the performance of proposed flexibility
341
+ calculation method is tested and validated in a modified
342
+ LV grid of Switzerland. The LV grid includes a PV unit
343
+ with known capacity to the DSO, and connected to bus 101
344
+ with the capacity of 72 kVA. Further, EVSEs are
345
+ connected to the buses 102 and 106. The grid with several
346
+ GridEye monitoring devices is illustrated in Fig. 3. The
347
+ real-time information of the aggregated loads and
348
+ productions are provided by the GridEye monitoring
349
+ devices. Figs. 4 and 5 illustrate the 10-minute granularity
350
+ of PV generation and load profiles used in the simulations.
351
+ In this work, the DSO’s objective is to estimate the
352
+ distribution grid’s flexibility value, the value of flexibility
353
+ insufficiency and the periods with insufficient flexibility.
354
+
355
+
356
+ Fig. 3. The LV grid topology with grid monitoring devices
357
+
358
+ Fig. 4. Load profile of LV grid
359
+
360
+
361
+ Fig. 5. PV generation at node 101
362
+
363
+ Table 1 provides the simulation results for time slots that
364
+ there is insufficient flexibility in the grid, considering the
365
+ uncertainties of load and PVs. For the remaining time slots,
366
+ the flexibility value is equal to one indicating that there is
367
+ adequate flexibility in the grid. At the time periods from
368
+ 09:20 to 10:10 and from 17:40 to 18:20 on Nov 28, 2021
369
+ due to the high level of load and the thermal capacity of
370
+ lines, the flexibility is less than one. The uncertain and
371
+ feasible regions for two sample time slots are depicted in
372
+ Figs. 6 and 7. The value of flexibility at time slot 18:00
373
+ (Fig. 7) has the lowest value 0.49. In this time slot, the load
374
+ level is higher than the average consumption and the PV
375
+ generation has the lowest value. Moreover, at time slot
376
+ 09:40 (Fig. 6), the load is at its highest value and the value
377
+ of flexibility is 0.74. At this time slot, although the load
378
+ has the highest value, the flexibility index is higher than
379
+ the one at time slot 18:00. The higher level of flexibility is
380
+ due to the high level of PV generation at this time which is
381
+ 20.4 kW.
382
+
383
+
384
+ CIRED100
385
+ 103
386
+ 104
387
+ 101
388
+ Low voltage grid
389
+ 105
390
+ 102
391
+ EVSE
392
+ Grid monitoring device
393
+ EVSE240-
394
+ 220
395
+ (M)peo
396
+ 200
397
+ 180
398
+ 160-
399
+ 00:00
400
+ 06:00
401
+ 12:00
402
+ 18:00
403
+ Nov28,2021
404
+ Time20-
405
+ 15-
406
+ PV (kW)
407
+ 10-
408
+ 5-
409
+ 00:00
410
+ 06:00
411
+ 12:00
412
+ 18:00
413
+ Nov28,2021
414
+ Time CIRED workshop on E-mobility and power distribution systems
415
+ Porto, 2-3 June 2022
416
+
417
+ Paper n° 1347
418
+
419
+
420
+ CIRED 2022 Workshop
421
+ 4/4
422
+ Table 1. Flexibility results for insecure time slots
423
+ Time slot
424
+ Flexibility
425
+ Nov 28, 2021, 09:20
426
+ 0.93
427
+ Nov 28, 2021, 09:30
428
+ 0.84
429
+ Nov 28, 2021, 09:40
430
+ 0.74
431
+ Nov 28, 2021, 09:50
432
+ 0.84
433
+ Nov 28, 2021, 10:00
434
+ 0.80
435
+ Nov 28, 2021, 10:10
436
+ 0.80
437
+ Nov 28, 2021, 17:40
438
+ 0.67
439
+ Nov 28, 2021, 17:50
440
+ 0.57
441
+ Nov 28, 2021, 18:00
442
+ 0.49
443
+ Nov 28, 2021, 18:10
444
+ 0.72
445
+ Nov 28, 2021, 18:20
446
+ 0.90
447
+
448
+
449
+ Fig. 6. Feasible and uncertain regions on Nov 28, 2021 at
450
+ 09:40.
451
+
452
+
453
+ Fig. 7. Feasible and uncertain regions on Nov 28, 2021 at
454
+ 18:00.
455
+ CONCLUSIONS
456
+ In this paper, a flexibility estimation methodology is
457
+ proposed taking into account the feasible grid operation
458
+ region and the uncertain region of load containing electric
459
+ vehicle and photovoltaic powers. The method is applicable
460
+ by using the information of grid monitoring devices. The
461
+ simulation results on the LV grid illustrated the inadequate
462
+ flexibility in time slots with the higher level of load and
463
+ the lower level of PV generation than the average level.
464
+ However, the model can detect other factors like voltage
465
+ limit violation regarding the grid constraints that limit the
466
+ grid’s flexibility. The outcome of proposed model informs
467
+ the grid operator regarding the time slots with sufficient
468
+ and insufficient flexibility along with the value of
469
+ insufficiency considering the uncertainty space.
470
+
471
+ Acknowledgments
472
+ This project is supported by the European Union’s Horizon
473
+ 2020 programme under the Marie Sklodowska-Curie grant
474
+ agreement no. 101026259.
475
+
476
+ REFERENCES
477
+
478
+ [1] North American Electric Reliability Corporation,
479
+ "Accommodating high levels of variable generation"
480
+ (North American Electric Reliability Corp, 2009)
481
+ [2] International Energy Agency, "Harnessing variable
482
+ renewables: A guide to the balancing challenge"
483
+ (Organisation for Economic Co-operation and
484
+ Development, 2011)
485
+ [3] H. Nosair, F. Bouffard, 2015. "Flexibility envelopes for
486
+ power
487
+ system
488
+ operational
489
+ planning",
490
+ IEEE
491
+ Transactions on Sustainable Energy, 6(3), pp.800-
492
+ 809.
493
+ [4] J. Zhao, T. Zheng, E. Litvinov, 2015. "A unified
494
+ framework for defining and measuring flexibility in
495
+ power system". IEEE Transactions on power systems,
496
+ 31(1), 339-347
497
+ [5] F.L. Müller, J. Szabó, O. Sundström, and J. Lygeros,
498
+ 2017. "Aggregation and disaggregation of energetic
499
+ flexibility from distributed energy resources", IEEE
500
+ Transactions on Smart Grid, 10(2), 1205-1214..
501
+ [6] F. Chen, C. Huang, L. Wang, C. Zhu, C. Wang, and
502
+ Xie, N., 2017, November. "Flexibility evaluation of
503
+ distribution network with high penetration of variable
504
+ generations." In 2017 IEEE Conference on Energy
505
+ Internet and Energy System Integration (EI2), 1-6
506
+ [7] A. Majzoobi, A. Khodaei, 2016. "Application of
507
+ microgrids in supporting distribution grid flexibility."
508
+ IEEE Transactions on Power Systems, 32(5),
509
+ pp.3660-3669.
510
+ [8] Li, J. , Du, J. , Zhao, Z. , et al.: "An Efficient Method
511
+ for Flexibility Analysis of Large -scale Nonconvex
512
+ Heat Exchanger Networks", Industrial & Engineering
513
+ Chemistry Research , 2015, 54, (43), 10757 -10767
514
+ [9] European Commission. "Smart Metering deployment
515
+ in the European Union", http://bit.ly/2MbDfeL,
516
+ accessed Feb 2021.
517
+ [10] Swiss Federal Office of Energy. "Smart Grids",
518
+ http://bit.ly/3pB8vS5, accessed Feb 2021
519
+ [11] L. Richaud, R. Pellerej, C. Benoit, and E .Ramos,
520
+ CIRED 2019, "Analysis of voltage patterns for
521
+ topology identification and GIS correction".
522
+ [12] L. Jilong, J. Du, Z. Zhao, and P. Yao, 2015, "Efficient
523
+ method for flexibility analysis of large-scale
524
+ nonconvex heat exchanger networks. " Industrial &
525
+ Engineering Chemistry Research 54, no. 43, 10757-
526
+ 10767.
527
+ [13] Method of determining mutual voltage sensitivity
528
+ coefficients between a plurality of measuring nodes of
529
+ an electric power network, US patent, 0003811, Jan.
530
+ 2, 2020.
531
+
532
+
533
+ CIRED300-
534
+ Load (kW)
535
+ Uncertain region
536
+ 250
537
+ Feasible region
538
+ 200-
539
+ 16
540
+ 18
541
+ 20
542
+ 22
543
+ 24
544
+ 26
545
+ PV(kW)300-
546
+ Load (kW)
547
+ Uncertain region
548
+ 250
549
+ Feasible region
550
+ 200
551
+ 16
552
+ 18
553
+ 20
554
+ 22
555
+ 24
556
+ 26
557
+ PV(kW)
AtE2T4oBgHgl3EQfRQeF/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf,len=206
2
+ page_content='CIRED workshop on E-mobility and power distribution systems Porto, 2-3 June 2022 Paper n° 1347 CIRED 2022 Workshop 1/4 ESTIMATING REQUIRED FLEXIBILITY FOR SECURE DISTRIBUTION GRID OPERATION CONSIDERING THE UNCERTAINTIES OF EV AND PV Manijeh ALIPOUR Omid ALIZADEH-MOUSAVI Depsys, Puidoux - Switzerland Depsys, Puidoux - Switzerland manijeh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
3
+ page_content='alipour@depsys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
4
+ page_content='com omid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
5
+ page_content='mousavi@depsys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
6
+ page_content='com ABSTRACT Renewable energy productions and electrification of mobility are promising solutions to reduce greenhouse gas emissions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
7
+ page_content=' Their effective integration in a power grid encounters several challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
8
+ page_content=' The uncertain nature of renewable energy productions as well as stochastic consumptions of electric vehicles introduce remarkable intermittency to a distribution grid and results in bi- uncertain characteristics of both supply and demand sides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
9
+ page_content=' One way to verify the secure grid operation within acceptable voltage and loading levels is to assess its required flexibility considering possible boundaries of uncertain variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
10
+ page_content=' In this paper, first a comprehensive linear model of distribution grid considering all pertaining constraints is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
11
+ page_content=' Then, a flexibility estimation technique is proposed based on the feasibility study of the uncertain space of photovoltaic power productions and load containing electric vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
12
+ page_content=' The proposed methodology uses grid monitoring data to determine grid state and to model uncertain parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
13
+ page_content=' The model is applied on a real low voltage (LV) system equipped with grid monitoring devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
14
+ page_content=' INTRODUCTION The deployment of renewable energy productions has many advantages, comprising economic convenience, reducing the reliance on fossil fuel markets (especially, gas and oil) and environmental friendliness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
15
+ page_content=' In addition, the wide penetration of renewable energy sources accelerates occupation in the EU, by job creation in different ‘green’ technologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
16
+ page_content=' Aim behind the European Green Deal (COM(2019) 640 final) is to be the world’s first climate- neutral continent by 2050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
17
+ page_content=' In spite of renewable generation’s significant advantages, it has the defects of uncertainty and fluctuation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
18
+ page_content=' Besides to renewable generations, electric vehicles as a new variable load increase the intermittency of distribution grid and leads to bi-uncertain characteristics of both demand and supply sides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
19
+ page_content=' With the increased penetration of these uncertainty sources, the modern power system is confronted with a challenge to preserve the reliability, security, and quality of supply.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
20
+ page_content=' Thus, a more flexible distribution grid is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
21
+ page_content=' The distribution grid flexibility can be defined from different perspectives [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
22
+ page_content=' In [2], the flexibility is specified as the degree to which a system can change its electricity consumption and production in response to anticipated or unanticipated variabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
23
+ page_content=' The flexibility envelope method is presented in [3] to assess the flexibility potential of individual power system assets and their aggregation at the system level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
24
+ page_content=' A flexibility measure is developed in [4] which indicates the largest uncertainty deviation that a system can bear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
25
+ page_content=' In [5] an algorithm is proposed which allocates aggregate-level control decisions amongst individual systems economically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
26
+ page_content=' Concerning the flexibility estimation of distribution grids, an index is proposed in [6] that is related to specific viewpoints such as power regulation ability and energy balance ability of distribution grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
27
+ page_content=' In [7] the net load uncertainty is considered in the flexibility assessment of distribution grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
28
+ page_content=' The secure grid operation can be verified by assessing the required flexibility considering boundaries of uncertain variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
29
+ page_content=' A hyper-rectangle is implemented for heat exchanger networks [8] to define the multi-dimensional region of uncertain variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
30
+ page_content=' In this paper, the flexibility and the lack of flexibility are quantified by determining acceptable boundaries of uncertain parameters in distribution grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
31
+ page_content=' A large portion of uncertainty resources are not monitored by the Distribution System Operator (DSO) and as a consequence, the level of available flexibility of the grid as well as the lack of flexibility caused by the uncertain resources are not identified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
32
+ page_content=' In such a situation, the power flows from unmonitored and uncontrolled resources may cause voltage violations and congestions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
33
+ page_content=' Therefore, the monitoring of grid and the control of resources connected to the grid are vital for the secure operation of distribution grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
34
+ page_content=' The DSOs around the world are going through the roll-out of smart metering and grid monitoring devices [9, 10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
35
+ page_content=' However, equipping all the nodes of distribution grid with monitoring devices is practically impossible due to costs of required underlying infrastructure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
36
+ page_content=' In addition, although all the grid topology information is usually available in the DSO’s Geographic Information System (GIS), a trustable and up-to-date data of LV grid topology and parameters is not available to take decisions for the secure grid operation [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
37
+ page_content=' To face these challenges, the grid sensitivity coefficients of a subset of grid nodes can be used as an approximation of the power flow model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
38
+ page_content=' The sensitivity coefficients can be calculated between a plurality of measurement nodes without using the information of grid parameters [13], hereafter called model-less approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
39
+ page_content=' This approach significantly reduces the required number of monitored nodes, thus reducing the cost of required grid monitoring infrastructure, and does not rely on the availability of an accurate and up-to-date grid parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
40
+ page_content=' This paper proposes a flexibility estimation model for distribution grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
41
+ page_content=' The main contributions are as follows: 1) The proposed flexibility estimation method is based on the feasibility study of the uncertain space of load containing electric vehicles and photovoltaic powers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
42
+ page_content=' It allows evaluating the flexibility and lack of flexibility based on the coverage of the feasible space to the CIRED CIRED workshop on E-mobility and power distribution systems Porto, 2-3 June 2022 Paper n° 1347 CIRED 2022 Workshop 2/4 uncertain region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
43
+ page_content=' 2) The projection of each direction in the uncertain space to the feasible space is illustrated and investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
44
+ page_content=' The illustrated projection to the feasible space can assist in determining the reason of flexibility inadequacy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
45
+ page_content=' 3) The linear power flow model based on the sensitivity coefficients is used to model the grid operation constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
46
+ page_content=' It properly accounts for the secure grid operation within acceptable voltage levels and without congestions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
47
+ page_content=' 4) The proposed model for the calculation of the flexibility index is a linear and convex mathematical optimization problem that can be solved efficiently with non-commercial solvers and the optimality of the solution is guaranteed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
48
+ page_content=' The remaining parts of the paper is structured as following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
49
+ page_content=' Next section provides the definition of distribution grid flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
50
+ page_content=' Then, the proposed mathematical formulation is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
51
+ page_content=' The results of applying the method on a real LV grid are evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
52
+ page_content=' Finally, the conclusions and outcomes are discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
53
+ page_content=' THE FLEXIBILITY OF DISTRIBUTION GRID In this paper, the flexibility of distribution grid is described as the capability of distribution grid to effectively cope with multiple uncertainties of grid operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
54
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
55
+ page_content=' 1 illustrates the structure of the proposed flexibility estimation algorithm in a LV distribution grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
56
+ page_content=' The power grid contains Electric Vehicles (EV), EV charging stations, photovoltaic arrays and commercial load.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
57
+ page_content=' In the proposed method, grid monitoring data are used to calculate the grid state and the sensitivity coefficients as well as to model uncertain parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
58
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
59
+ page_content=' 1 shows the way that flexibility quantification is realized using the grid monitoring data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
60
+ page_content=' Flexibility assessment and quantification MV/LV DSO Real-time data acquisition Monitoring device Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
61
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
62
+ page_content=' Proposed monitoring based flexibility quantification structure In fact, the proposed model is accomplished by detailed and accurate information of the resources from the grid monitoring facilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
63
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
64
+ page_content=' 2 shows the concept of proposed flexibility quantification method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
65
+ page_content=' The uncertain space is calculated using historic data and prediction methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
66
+ page_content=' The margin of feasible space characterizes the maximum feasible deviation from the expected operation point ‘O’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
67
+ page_content=' Any operation point in the uncertain space can be characterized by two components: the direction vector and the normalized variation value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
68
+ page_content=' The direction matrix expresses the direction vector, in which each diagonal element indicates an uncertain variable, and the range of element’s value is [−1, 1] [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
69
+ page_content=' In order to guarantee that direction matrix elements represent a unique direction, at least one of the direction matrix elements must be set to - 1 or 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
70
+ page_content=' This will refrain the duplication of direction matrices with the same direction like diag(1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
71
+ page_content='5) and diag(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
72
+ page_content='8, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
73
+ page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
74
+ page_content=' Any point outside the feasible region indicates an infeasible grid operation point where the technical constraints of the grid are not satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
75
+ page_content=' The minimum feasible deviation direction stands for the critical direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
76
+ page_content=' The boundary point, shown by ‘C’ in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
77
+ page_content='2, corresponding to the critical direction is the flexibility index which reveals the adequacy or insufficiency of flexibility resources in the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
78
+ page_content=' O C Feasible Region Max pv P min pv P min lP Max lP Uncertain Region pv P lP pv P lP Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
79
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
80
+ page_content=' The concept of the proposed flexibility quantification method MATHEMATICAL FORMULATION A mathematical optimization problem is formulated to determine the feasible space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
81
+ page_content=' The objective function is the minimum value of maximum feasible variation in the direction D (𝛽𝐷) of feasible operation region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
82
+ page_content=' In addition, the flexibility (F) is defined as the minimum value of 𝛽𝐷.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
83
+ page_content=' 𝐹 = 𝑚𝑖𝑛𝛽𝐷 (1) 𝛽𝐷 = 𝑚𝑎𝑥(𝛽) (2) 𝑋 = 𝑋̃ + 𝛽𝐷∆𝑋 (3) where 𝑋 and 𝑋̃ represent the uncertain parameter and its expected value which are distribution grid’s load and PV generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
84
+ page_content=' ∆𝑋 indicates the expected value and maximum/minimum value of uncertain parameter’s difference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
85
+ page_content=' If the flexibility index is equal to one (F=1), the flexibility resources are enough for the secure operation of the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
86
+ page_content=' In addition, it means that feasible region covers the uncertain region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
87
+ page_content=' However, if the flexibility index is less than one (F <1), there is not sufficient margin for the technically secure grid operation and additional flexibility resources are required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
88
+ page_content=' The direction matrix D is a diagonal matrix that can be defined as: 𝐷 = [ 𝑑1 𝑑2 ⋱ 𝑑𝑛] (4) CIRED CIRED workshop on E-mobility and power distribution systems Porto, 2-3 June 2022 Paper n° 1347 CIRED 2022 Workshop 3/4 where diagonal element di is the direction on the ith uncertain parameter and n is the number of uncertain parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
89
+ page_content=' For the flexibility estimation a linear power flow model is used based on the sensitivity coefficients calculated using the model-less approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
90
+ page_content=' The formulation is described below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
91
+ page_content=' Δ𝑉𝑚 = ∑ [𝐾𝑚,𝑛 𝑉𝑃 Δ𝑃𝑛 + 𝐾𝑚,𝑛 𝑉𝑄 Δ𝑄𝑛] 𝑛 (5) Δ𝐼𝑙 = ∑ [𝐾𝑙,𝑛 𝐼𝑃Δ𝑃𝑛 + 𝐾𝑙,𝑛 𝐼𝑄Δ𝑄𝑛 ] 𝑛 (6) 𝑉𝑚𝑖𝑛 ≤ 𝑉𝑚 0 + ∆𝑉𝑚 ≤ 𝑉𝑚𝑎𝑥 (7) −𝐼𝑚𝑎𝑥 ≤ 𝐼𝑙 0 + ∆𝐼𝑙 ≤ 𝐼𝑚𝑎𝑥 (8) ∆𝑉𝑚 = 𝑉𝑚 − 𝑉𝑚 0 (9) ∆𝐼𝑙 = 𝐼𝑙 − 𝐼𝑙 0 (10) 0 ≤ 𝑆𝑔𝑟𝑖𝑑 ≤ 𝑠𝑇𝑟𝑎𝑓𝑜 𝑚𝑎𝑥 (11) The voltage and current sensitivity coefficients are used in equations (5) and (6), respectively, to model the impacts of nodal active and reactive power changes (Δ𝑃𝑛 and Δ𝑄𝑛 ) on the nodal voltage variations (Δ𝑉𝑚) and branch current variations (Δ𝐼𝑙).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
92
+ page_content=' The sensitivity coefficients, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
93
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
94
+ page_content=', 𝐾𝑉𝑃, 𝐾𝑉𝑄, 𝐾𝐼𝑃𝑎𝑛𝑑 𝐾𝐼𝑄, are computed around the grid operation point corresponding to voltage 𝑉𝑚 0 and current 𝐼𝑙,𝑡 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
95
+ page_content=' The voltage level at each node and the branch current should be within the allowed limits as given in (7) and (8), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
96
+ page_content=' The voltage and current deviations can be calculated as given by equations (9) and (10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
97
+ page_content=' The constraint (11) limits the apparent power flow in the MV/LV transformer (𝑆𝑔𝑟𝑖𝑑) by the capacity of transformer (���𝑚𝑎𝑥 𝑇𝑟𝑎𝑓𝑜).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
98
+ page_content=' RESULTS AND DISCUSSIONS In this section, the performance of proposed flexibility calculation method is tested and validated in a modified LV grid of Switzerland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
99
+ page_content=' The LV grid includes a PV unit with known capacity to the DSO, and connected to bus 101 with the capacity of 72 kVA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
100
+ page_content=' Further, EVSEs are connected to the buses 102 and 106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
101
+ page_content=' The grid with several GridEye monitoring devices is illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
102
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
103
+ page_content=' The real-time information of the aggregated loads and productions are provided by the GridEye monitoring devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
104
+ page_content=' Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
105
+ page_content=' 4 and 5 illustrate the 10-minute granularity of PV generation and load profiles used in the simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
106
+ page_content=' In this work, the DSO’s objective is to estimate the distribution grid’s flexibility value, the value of flexibility insufficiency and the periods with insufficient flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
107
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
108
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
109
+ page_content=' The LV grid topology with grid monitoring devices Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
110
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
111
+ page_content=' Load profile of LV grid Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
112
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
113
+ page_content=' PV generation at node 101 Table 1 provides the simulation results for time slots that there is insufficient flexibility in the grid, considering the uncertainties of load and PVs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
114
+ page_content=' For the remaining time slots, the flexibility value is equal to one indicating that there is adequate flexibility in the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
115
+ page_content=' At the time periods from 09:20 to 10:10 and from 17:40 to 18:20 on Nov 28, 2021 due to the high level of load and the thermal capacity of lines, the flexibility is less than one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
116
+ page_content=' The uncertain and feasible regions for two sample time slots are depicted in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
117
+ page_content=' 6 and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
118
+ page_content=' The value of flexibility at time slot 18:00 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
119
+ page_content=' 7) has the lowest value 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
120
+ page_content='49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
121
+ page_content=' In this time slot, the load level is higher than the average consumption and the PV generation has the lowest value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
122
+ page_content=' Moreover, at time slot 09:40 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
123
+ page_content=' 6), the load is at its highest value and the value of flexibility is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
124
+ page_content='74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
125
+ page_content=' At this time slot, although the load has the highest value, the flexibility index is higher than the one at time slot 18:00.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
126
+ page_content=' The higher level of flexibility is due to the high level of PV generation at this time which is 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
127
+ page_content='4 kW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
128
+ page_content=' CIRED100 103 104 101 Low voltage grid 105 102 EVSE Grid monitoring device EVSE240- 220 (M)peo 200 180 160- 00:00 06:00 12:00 18:00 Nov28,2021 Time20- 15- PV (kW) 10- 5- 00:00 06:00 12:00 18:00 Nov28,2021 Time CIRED workshop on E-mobility and power distribution systems Porto, 2-3 June 2022 Paper n° 1347 CIRED 2022 Workshop 4/4 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
129
+ page_content=' Flexibility results for insecure time slots Time slot Flexibility Nov 28, 2021, 09:20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
130
+ page_content='93 Nov 28, 2021, 09:30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
131
+ page_content='84 Nov 28, 2021, 09:40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
132
+ page_content='74 Nov 28, 2021, 09:50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
133
+ page_content='84 Nov 28, 2021, 10:00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
134
+ page_content='80 Nov 28, 2021, 10:10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
135
+ page_content='80 Nov 28, 2021, 17:40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
136
+ page_content='67 Nov 28, 2021, 17:50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
137
+ page_content='57 Nov 28, 2021, 18:00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
138
+ page_content='49 Nov 28, 2021, 18:10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
139
+ page_content='72 Nov 28, 2021, 18:20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
140
+ page_content='90 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
141
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
142
+ page_content=' Feasible and uncertain regions on Nov 28, 2021 at 09:40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
143
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
144
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
145
+ page_content=' Feasible and uncertain regions on Nov 28, 2021 at 18:00.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
146
+ page_content=' CONCLUSIONS In this paper, a flexibility estimation methodology is proposed taking into account the feasible grid operation region and the uncertain region of load containing electric vehicle and photovoltaic powers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
147
+ page_content=' The method is applicable by using the information of grid monitoring devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
148
+ page_content=' The simulation results on the LV grid illustrated the inadequate flexibility in time slots with the higher level of load and the lower level of PV generation than the average level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
149
+ page_content=' However, the model can detect other factors like voltage limit violation regarding the grid constraints that limit the grid’s flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
150
+ page_content=' The outcome of proposed model informs the grid operator regarding the time slots with sufficient and insufficient flexibility along with the value of insufficiency considering the uncertainty space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
151
+ page_content=' Acknowledgments This project is supported by the European Union’s Horizon 2020 programme under the Marie Sklodowska-Curie grant agreement no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
152
+ page_content=' 101026259.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
153
+ page_content=' REFERENCES [1] North American Electric Reliability Corporation, "Accommodating high levels of variable generation" (North American Electric Reliability Corp, 2009) [2] International Energy Agency, "Harnessing variable renewables: A guide to the balancing challenge" (Organisation for Economic Co-operation and Development, 2011) [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
154
+ page_content=' Nosair, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
155
+ page_content=' Bouffard, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
156
+ page_content=' "Flexibility envelopes for power system operational planning", IEEE Transactions on Sustainable Energy, 6(3), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
157
+ page_content='800- 809.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
158
+ page_content=' [4] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
159
+ page_content=' Zhao, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
160
+ page_content=' Zheng, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
161
+ page_content=' Litvinov, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
162
+ page_content=' "A unified framework for defining and measuring flexibility in power system".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
163
+ page_content=' IEEE Transactions on power systems, 31(1), 339-347 [5] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
164
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
165
+ page_content=' Müller, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
166
+ page_content=' Szabó, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
167
+ page_content=' Sundström, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
168
+ page_content=' Lygeros, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
169
+ page_content=' "Aggregation and disaggregation of energetic flexibility from distributed energy resources", IEEE Transactions on Smart Grid, 10(2), 1205-1214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
170
+ page_content='. [6] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
171
+ page_content=' Chen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
172
+ page_content=' Huang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
173
+ page_content=' Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
174
+ page_content=' Zhu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
175
+ page_content=' Wang, and Xie, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
176
+ page_content=', 2017, November.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
177
+ page_content=' "Flexibility evaluation of distribution network with high penetration of variable generations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
178
+ page_content='" In 2017 IEEE Conference on Energy Internet and Energy System Integration (EI2), 1-6 [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
179
+ page_content=' Majzoobi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
180
+ page_content=' Khodaei, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
181
+ page_content=' "Application of microgrids in supporting distribution grid flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
182
+ page_content='" IEEE Transactions on Power Systems, 32(5), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
183
+ page_content='3660-3669.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
184
+ page_content=' [8] Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
185
+ page_content=' , Du, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
186
+ page_content=' , Zhao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
187
+ page_content=' , et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
188
+ page_content=' : "An Efficient Method for Flexibility Analysis of Large -scale Nonconvex Heat Exchanger Networks", Industrial & Engineering Chemistry Research , 2015, 54, (43), 10757 -10767 [9] European Commission.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
189
+ page_content=' "Smart Metering deployment in the European Union", http://bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
190
+ page_content='ly/2MbDfeL, accessed Feb 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
191
+ page_content=' [10] Swiss Federal Office of Energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
192
+ page_content=' "Smart Grids", http://bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
193
+ page_content='ly/3pB8vS5, accessed Feb 2021 [11] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
194
+ page_content=' Richaud, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
195
+ page_content=' Pellerej, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
196
+ page_content=' Benoit, and E .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
197
+ page_content='Ramos, CIRED 2019, "Analysis of voltage patterns for topology identification and GIS correction".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
198
+ page_content=' [12] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
199
+ page_content=' Jilong, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
200
+ page_content=' Du, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
201
+ page_content=' Zhao, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
202
+ page_content=' Yao, 2015, "Efficient method for flexibility analysis of large-scale nonconvex heat exchanger networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
203
+ page_content=' " Industrial & Engineering Chemistry Research 54, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
204
+ page_content=' 43, 10757- 10767.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
205
+ page_content=' [13] Method of determining mutual voltage sensitivity coefficients between a plurality of measuring nodes of an electric power network, US patent, 0003811, Jan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
206
+ page_content=' 2, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}
207
+ page_content=' CIRED300- Load (kW) Uncertain region 250 Feasible region 200- 16 18 20 22 24 26 PV(kW)300- Load (kW) Uncertain region 250 Feasible region 200 16 18 20 22 24 26 PV(kW)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtE2T4oBgHgl3EQfRQeF/content/2301.03779v1.pdf'}